aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/53c700.c57
-rw-r--r--drivers/scsi/53c700.h34
-rw-r--r--drivers/scsi/53c7xx.c13
-rw-r--r--drivers/scsi/BusLogic.c64
-rw-r--r--drivers/scsi/BusLogic.h1
-rw-r--r--drivers/scsi/FlashPoint.c1
-rw-r--r--drivers/scsi/Kconfig174
-rw-r--r--drivers/scsi/Makefile22
-rw-r--r--drivers/scsi/NCR5380.c4
-rw-r--r--drivers/scsi/NCR53C9x.c19
-rw-r--r--drivers/scsi/NCR53C9x.h1
-rw-r--r--drivers/scsi/NCR_D700.c16
-rw-r--r--drivers/scsi/NCR_Q720.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a2091.c8
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c10
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/aacraid/aachba.c60
-rw-r--r--drivers/scsi/aacraid/aacraid.h20
-rw-r--r--drivers/scsi/aacraid/commctrl.c25
-rw-r--r--drivers/scsi/aacraid/comminit.c36
-rw-r--r--drivers/scsi/aacraid/commsup.c280
-rw-r--r--drivers/scsi/aacraid/dpcsup.c10
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/rkt.c446
-rw-r--r--drivers/scsi/aacraid/rx.c119
-rw-r--r--drivers/scsi/aacraid/sa.c23
-rw-r--r--drivers/scsi/advansys.c117
-rw-r--r--drivers/scsi/aha152x.c100
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aha1740.c3
-rw-r--r--drivers/scsi/ahci.c1472
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c5
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c87
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h13
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c19
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c24
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile2
-rw-r--r--drivers/scsi/aic7xxx_old.c15
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_proc.c1
-rw-r--r--drivers/scsi/aic94xx/Kconfig41
-rw-r--r--drivers/scsi/aic94xx/Makefile39
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h114
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c353
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c959
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.h52
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c1376
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h397
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c866
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.c332
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.h302
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h2398
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h785
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c758
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c1089
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c1404
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.h70
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c642
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c636
-rw-r--r--drivers/scsi/amiga7xx.c1
-rw-r--r--drivers/scsi/arcmsr/Makefile6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h472
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c381
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1496
-rw-r--r--drivers/scsi/arm/Kconfig3
-rw-r--r--drivers/scsi/arm/acornscsi.c3
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/arm/scsi.h2
-rw-r--r--drivers/scsi/ata_piix.c904
-rw-r--r--drivers/scsi/atari_NCR5380.c3
-rw-r--r--drivers/scsi/atari_scsi.c1
-rw-r--r--drivers/scsi/atp870u.c163
-rw-r--r--drivers/scsi/blz1230.c2
-rw-r--r--drivers/scsi/blz2060.c2
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/constants.c127
-rw-r--r--drivers/scsi/cyberstorm.c2
-rw-r--r--drivers/scsi/cyberstormII.c2
-rw-r--r--drivers/scsi/dc395x.c4
-rw-r--r--drivers/scsi/dec_esp.c10
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h1
-rw-r--r--drivers/scsi/dpt_i2o.c9
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/eata.c3
-rw-r--r--drivers/scsi/eata_generic.h1
-rw-r--r--drivers/scsi/eata_pio.c132
-rw-r--r--drivers/scsi/esp.c335
-rw-r--r--drivers/scsi/esp.h4
-rw-r--r--drivers/scsi/fastlane.c2
-rw-r--r--drivers/scsi/fcal.c4
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/fdomain.c3
-rw-r--r--drivers/scsi/g_NCR5380.c6
-rw-r--r--drivers/scsi/g_NCR5380.h1
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/gvp11.c10
-rw-r--r--drivers/scsi/gvp11.h4
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hptiop.c571
-rw-r--r--drivers/scsi/ibmmca.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c64
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c17
-rw-r--r--drivers/scsi/ide-scsi.c3
-rw-r--r--drivers/scsi/imm.c4
-rw-r--r--drivers/scsi/imm.h2
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/initio.h13
-rw-r--r--drivers/scsi/ipr.c37
-rw-r--r--drivers/scsi/ipr.h82
-rw-r--r--drivers/scsi/ips.c9
-rw-r--r--drivers/scsi/iscsi_tcp.c1134
-rw-r--r--drivers/scsi/iscsi_tcp.h46
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/libata-bmdma.c1150
-rw-r--r--drivers/scsi/libata-core.c5916
-rw-r--r--drivers/scsi/libata-eh.c1907
-rw-r--r--drivers/scsi/libata-scsi.c3052
-rw-r--r--drivers/scsi/libata.h115
-rw-r--r--drivers/scsi/libiscsi.c533
-rw-r--r--drivers/scsi/libsas/Kconfig39
-rw-r--r--drivers/scsi/libsas/Makefile36
-rw-r--r--drivers/scsi/libsas/sas_discover.c749
-rw-r--r--drivers/scsi/libsas/sas_dump.c76
-rw-r--r--drivers/scsi/libsas/sas_dump.h42
-rw-r--r--drivers/scsi/libsas/sas_event.c75
-rw-r--r--drivers/scsi/libsas/sas_expander.c1855
-rw-r--r--drivers/scsi/libsas/sas_init.c267
-rw-r--r--drivers/scsi/libsas/sas_internal.h146
-rw-r--r--drivers/scsi/libsas/sas_phy.c158
-rw-r--r--drivers/scsi/libsas/sas_port.c279
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c786
-rw-r--r--drivers/scsi/lpfc/lpfc.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c475
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c205
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c87
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c112
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c4
-rw-r--r--drivers/scsi/mac_esp.c7
-rw-r--r--drivers/scsi/mac_scsi.c7
-rw-r--r--drivers/scsi/megaraid.c11
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c62
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c52
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h21
-rw-r--r--drivers/scsi/mesh.c23
-rw-r--r--drivers/scsi/mvme147.c6
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/ncr53c8xx.h2
-rw-r--r--drivers/scsi/nsp32.c12
-rw-r--r--drivers/scsi/oktagon_esp.c3
-rw-r--r--drivers/scsi/oktagon_io.S1
-rw-r--r--drivers/scsi/osst.h1
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pdc_adma.c739
-rw-r--r--drivers/scsi/pluto.c3
-rw-r--r--drivers/scsi/ppa.c3
-rw-r--r--drivers/scsi/ppa.h2
-rw-r--r--drivers/scsi/qla1280.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c925
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h151
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_devtbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c157
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c280
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c142
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c367
-rw-r--r--drivers/scsi/sata_mv.c2468
-rw-r--r--drivers/scsi/sata_nv.c596
-rw-r--r--drivers/scsi/sata_promise.c837
-rw-r--r--drivers/scsi/sata_promise.h157
-rw-r--r--drivers/scsi/sata_qstor.c730
-rw-r--r--drivers/scsi/sata_sil.c683
-rw-r--r--drivers/scsi/sata_sil24.c1181
-rw-r--r--drivers/scsi/sata_sis.c348
-rw-r--r--drivers/scsi/sata_svw.c509
-rw-r--r--drivers/scsi/sata_sx4.c1502
-rw-r--r--drivers/scsi/sata_uli.c301
-rw-r--r--drivers/scsi/sata_via.c394
-rw-r--r--drivers/scsi/sata_vsc.c486
-rw-r--r--drivers/scsi/scsi.c61
-rw-r--r--drivers/scsi/scsi.h2
-rw-r--r--drivers/scsi/scsi_debug.c1456
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_error.c242
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/scsi/scsi_lib.c220
-rw-r--r--drivers/scsi/scsi_logging.h1
-rw-r--r--drivers/scsi/scsi_netlink.c199
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_sas_internal.h10
-rw-r--r--drivers/scsi/scsi_scan.c148
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c408
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c663
-rw-r--r--drivers/scsi/scsi_transport_sas.c510
-rw-r--r--drivers/scsi/scsi_transport_spi.c31
-rw-r--r--drivers/scsi/scsicam.c3
-rw-r--r--drivers/scsi/sd.c175
-rw-r--r--drivers/scsi/seagate.c4
-rw-r--r--drivers/scsi/sg.c19
-rw-r--r--drivers/scsi/sgiwd93.c8
-rw-r--r--drivers/scsi/sim710.c4
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sr_vendor.c1
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c1252
-rw-r--r--drivers/scsi/sun3_NCR5380.c3
-rw-r--r--drivers/scsi/sun3x_esp.c12
-rw-r--r--drivers/scsi/sym53c8xx_2/sym53c8xx.h1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h1
-rw-r--r--drivers/scsi/t128.c2
-rw-r--r--drivers/scsi/tmscsim.c3
-rw-r--r--drivers/scsi/tmscsim.h1
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/ultrastor.c23
-rw-r--r--drivers/scsi/ultrastor.h12
-rw-r--r--drivers/scsi/wd33c93.c3
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--drivers/scsi/wd7000.c4
-rw-r--r--drivers/scsi/zalon.c2
261 files changed, 28159 insertions, 31373 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b003baf8d404..5a9475e56d0e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2122,7 +2122,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2122 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2122 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2123 2123
2124 /* Now setup the interrupt handler */ 2124 /* Now setup the interrupt handler */
2125 retval = request_irq(pdev->irq, twa_interrupt, SA_SHIRQ, "3w-9xxx", tw_dev); 2125 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2126 if (retval) { 2126 if (retval) {
2127 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); 2127 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2128 goto out_remove_host; 2128 goto out_remove_host;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 17dbd4ac8692..f3a5f422a8e4 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2397,7 +2397,7 @@ static int __devinit tw_probe(struct pci_dev *pdev, const struct pci_device_id *
2397 printk(KERN_WARNING "3w-xxxx: scsi%d: Found a 3ware Storage Controller at 0x%x, IRQ: %d.\n", host->host_no, tw_dev->base_addr, pdev->irq); 2397 printk(KERN_WARNING "3w-xxxx: scsi%d: Found a 3ware Storage Controller at 0x%x, IRQ: %d.\n", host->host_no, tw_dev->base_addr, pdev->irq);
2398 2398
2399 /* Now setup the interrupt handler */ 2399 /* Now setup the interrupt handler */
2400 retval = request_irq(pdev->irq, tw_interrupt, SA_SHIRQ, "3w-xxxx", tw_dev); 2400 retval = request_irq(pdev->irq, tw_interrupt, IRQF_SHARED, "3w-xxxx", tw_dev);
2401 if (retval) { 2401 if (retval) {
2402 printk(KERN_WARNING "3w-xxxx: Error requesting IRQ."); 2402 printk(KERN_WARNING "3w-xxxx: Error requesting IRQ.");
2403 goto out_remove_host; 2403 goto out_remove_host;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3c683dc23541..657a3ab75399 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -114,7 +114,6 @@
114 * */ 114 * */
115#define NCR_700_VERSION "2.8" 115#define NCR_700_VERSION "2.8"
116 116
117#include <linux/config.h>
118#include <linux/kernel.h> 117#include <linux/kernel.h>
119#include <linux/types.h> 118#include <linux/types.h>
120#include <linux/string.h> 119#include <linux/string.h>
@@ -174,6 +173,7 @@ STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
174STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); 173STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175STATIC void NCR_700_chip_setup(struct Scsi_Host *host); 174STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176STATIC void NCR_700_chip_reset(struct Scsi_Host *host); 175STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
176STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
@@ -183,10 +183,6 @@ STATIC struct device_attribute *NCR_700_dev_attrs[];
183 183
184STATIC struct scsi_transport_template *NCR_700_transport_template = NULL; 184STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 185
186struct NCR_700_sense {
187 unsigned char cmnd[MAX_COMMAND_SIZE];
188};
189
190static char *NCR_700_phase[] = { 186static char *NCR_700_phase[] = {
191 "", 187 "",
192 "after selection", 188 "after selection",
@@ -334,6 +330,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
334 tpnt->use_clustering = ENABLE_CLUSTERING; 330 tpnt->use_clustering = ENABLE_CLUSTERING;
335 tpnt->slave_configure = NCR_700_slave_configure; 331 tpnt->slave_configure = NCR_700_slave_configure;
336 tpnt->slave_destroy = NCR_700_slave_destroy; 332 tpnt->slave_destroy = NCR_700_slave_destroy;
333 tpnt->slave_alloc = NCR_700_slave_alloc;
337 tpnt->change_queue_depth = NCR_700_change_queue_depth; 334 tpnt->change_queue_depth = NCR_700_change_queue_depth;
338 tpnt->change_queue_type = NCR_700_change_queue_type; 335 tpnt->change_queue_type = NCR_700_change_queue_type;
339 336
@@ -612,9 +609,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
612 struct NCR_700_command_slot *slot = 609 struct NCR_700_command_slot *slot =
613 (struct NCR_700_command_slot *)SCp->host_scribble; 610 (struct NCR_700_command_slot *)SCp->host_scribble;
614 611
615 NCR_700_unmap(hostdata, SCp, slot); 612 dma_unmap_single(hostdata->dev, slot->pCmd,
613 sizeof(SCp->cmnd), DMA_TO_DEVICE);
616 if (slot->flags == NCR_700_FLAG_AUTOSENSE) { 614 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
617 struct NCR_700_sense *sense = SCp->device->hostdata; 615 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
618#ifdef NCR_700_DEBUG 616#ifdef NCR_700_DEBUG
619 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n", 617 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
620 SCp, SCp->cmnd[7], result); 618 SCp, SCp->cmnd[7], result);
@@ -625,10 +623,9 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
625 /* restore the old result if the request sense was 623 /* restore the old result if the request sense was
626 * successful */ 624 * successful */
627 if(result == 0) 625 if(result == 0)
628 result = sense->cmnd[7]; 626 result = cmnd[7];
629 } else 627 } else
630 dma_unmap_single(hostdata->dev, slot->pCmd, 628 NCR_700_unmap(hostdata, SCp, slot);
631 sizeof(SCp->cmnd), DMA_TO_DEVICE);
632 629
633 free_slot(slot, hostdata); 630 free_slot(slot, hostdata);
634#ifdef NCR_700_DEBUG 631#ifdef NCR_700_DEBUG
@@ -970,14 +967,15 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
970 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) { 967 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
971 struct NCR_700_command_slot *slot = 968 struct NCR_700_command_slot *slot =
972 (struct NCR_700_command_slot *)SCp->host_scribble; 969 (struct NCR_700_command_slot *)SCp->host_scribble;
973 if(SCp->cmnd[0] == REQUEST_SENSE) { 970 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
974 /* OOPS: bad device, returning another 971 /* OOPS: bad device, returning another
975 * contingent allegiance condition */ 972 * contingent allegiance condition */
976 scmd_printk(KERN_ERR, SCp, 973 scmd_printk(KERN_ERR, SCp,
977 "broken device is looping in contingent allegiance: ignoring\n"); 974 "broken device is looping in contingent allegiance: ignoring\n");
978 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]); 975 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
979 } else { 976 } else {
980 struct NCR_700_sense *sense = SCp->device->hostdata; 977 char *cmnd =
978 NCR_700_get_sense_cmnd(SCp->device);
981#ifdef NCR_DEBUG 979#ifdef NCR_DEBUG
982 scsi_print_command(SCp); 980 scsi_print_command(SCp);
983 printk(" cmd %p has status %d, requesting sense\n", 981 printk(" cmd %p has status %d, requesting sense\n",
@@ -995,21 +993,21 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
995 sizeof(SCp->cmnd), 993 sizeof(SCp->cmnd),
996 DMA_TO_DEVICE); 994 DMA_TO_DEVICE);
997 995
998 sense->cmnd[0] = REQUEST_SENSE; 996 cmnd[0] = REQUEST_SENSE;
999 sense->cmnd[1] = (SCp->device->lun & 0x7) << 5; 997 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1000 sense->cmnd[2] = 0; 998 cmnd[2] = 0;
1001 sense->cmnd[3] = 0; 999 cmnd[3] = 0;
1002 sense->cmnd[4] = sizeof(SCp->sense_buffer); 1000 cmnd[4] = sizeof(SCp->sense_buffer);
1003 sense->cmnd[5] = 0; 1001 cmnd[5] = 0;
1004 /* Here's a quiet hack: the 1002 /* Here's a quiet hack: the
1005 * REQUEST_SENSE command is six bytes, 1003 * REQUEST_SENSE command is six bytes,
1006 * so store a flag indicating that 1004 * so store a flag indicating that
1007 * this was an internal sense request 1005 * this was an internal sense request
1008 * and the original status at the end 1006 * and the original status at the end
1009 * of the command */ 1007 * of the command */
1010 sense->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; 1008 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1011 sense->cmnd[7] = hostdata->status[0]; 1009 cmnd[7] = hostdata->status[0];
1012 slot->pCmd = dma_map_single(hostdata->dev, sense->cmnd, sizeof(sense->cmnd), DMA_TO_DEVICE); 1010 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1013 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1011 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1014 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); 1012 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1015 slot->SG[0].pAddr = bS_to_host(slot->dma_handle); 1013 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
@@ -1531,7 +1529,7 @@ NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1531 1529
1532 /* clear all the negotiated parameters */ 1530 /* clear all the negotiated parameters */
1533 __shost_for_each_device(SDp, host) 1531 __shost_for_each_device(SDp, host)
1534 SDp->hostdata = NULL; 1532 NCR_700_clear_flag(SDp, ~0);
1535 1533
1536 /* clear all the slots and their pending commands */ 1534 /* clear all the slots and their pending commands */
1537 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { 1535 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
@@ -2036,7 +2034,17 @@ NCR_700_set_offset(struct scsi_target *STp, int offset)
2036 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; 2034 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2037} 2035}
2038 2036
2037STATIC int
2038NCR_700_slave_alloc(struct scsi_device *SDp)
2039{
2040 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2041 GFP_KERNEL);
2039 2042
2043 if (!SDp->hostdata)
2044 return -ENOMEM;
2045
2046 return 0;
2047}
2040 2048
2041STATIC int 2049STATIC int
2042NCR_700_slave_configure(struct scsi_device *SDp) 2050NCR_700_slave_configure(struct scsi_device *SDp)
@@ -2044,11 +2052,6 @@ NCR_700_slave_configure(struct scsi_device *SDp)
2044 struct NCR_700_Host_Parameters *hostdata = 2052 struct NCR_700_Host_Parameters *hostdata =
2045 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; 2053 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2046 2054
2047 SDp->hostdata = kmalloc(GFP_KERNEL, sizeof(struct NCR_700_sense));
2048
2049 if (!SDp->hostdata)
2050 return -ENOMEM;
2051
2052 /* to do here: allocate memory; build a queue_full list */ 2055 /* to do here: allocate memory; build a queue_full list */
2053 if(SDp->tagged_supported) { 2056 if(SDp->tagged_supported) {
2054 scsi_set_tag_type(SDp, MSG_ORDERED_TAG); 2057 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 7f22a06fe5ec..97ebe71b701b 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -12,7 +12,7 @@
12#include <asm/io.h> 12#include <asm/io.h>
13 13
14#include <scsi/scsi_device.h> 14#include <scsi/scsi_device.h>
15 15#include <scsi/scsi_cmnd.h>
16 16
17/* Turn on for general debugging---too verbose for normal use */ 17/* Turn on for general debugging---too verbose for normal use */
18#undef NCR_700_DEBUG 18#undef NCR_700_DEBUG
@@ -76,11 +76,16 @@ struct NCR_700_SG_List {
76 #define SCRIPT_RETURN 0x90080000 76 #define SCRIPT_RETURN 0x90080000
77}; 77};
78 78
79/* We use device->hostdata to store negotiated parameters. This is 79struct NCR_700_Device_Parameters {
80 * supposed to be a pointer to a device private area, but we cannot 80 /* space for creating a request sense command. Really, except
81 * really use it as such since it will never be freed, so just use the 81 * for the annoying SCSI-2 requirement for LUN information in
82 * 32 bits to cram the information. The SYNC negotiation sequence looks 82 * cmnd[1], this could be in static storage */
83 * like: 83 unsigned char cmnd[MAX_COMMAND_SIZE];
84 __u8 depth;
85};
86
87
88/* The SYNC negotiation sequence looks like:
84 * 89 *
85 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the 90 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
86 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION 91 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
@@ -98,19 +103,26 @@ struct NCR_700_SG_List {
98#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17) 103#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17)
99#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19) 104#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19)
100 105
106static inline char *NCR_700_get_sense_cmnd(struct scsi_device *SDp)
107{
108 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
109
110 return hostdata->cmnd;
111}
112
101static inline void 113static inline void
102NCR_700_set_depth(struct scsi_device *SDp, __u8 depth) 114NCR_700_set_depth(struct scsi_device *SDp, __u8 depth)
103{ 115{
104 long l = (long)SDp->hostdata; 116 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
105 117
106 l &= 0xffff00ff; 118 hostdata->depth = depth;
107 l |= 0xff00 & (depth << 8);
108 SDp->hostdata = (void *)l;
109} 119}
110static inline __u8 120static inline __u8
111NCR_700_get_depth(struct scsi_device *SDp) 121NCR_700_get_depth(struct scsi_device *SDp)
112{ 122{
113 return ((((unsigned long)SDp->hostdata) & 0xff00)>>8); 123 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
124
125 return hostdata->depth;
114} 126}
115static inline int 127static inline int
116NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag) 128NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag)
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
index 765769a629e4..acf292736b4e 100644
--- a/drivers/scsi/53c7xx.c
+++ b/drivers/scsi/53c7xx.c
@@ -232,7 +232,6 @@
232 232
233#include <linux/module.h> 233#include <linux/module.h>
234 234
235#include <linux/config.h>
236 235
237#include <linux/types.h> 236#include <linux/types.h>
238#include <asm/setup.h> 237#include <asm/setup.h>
@@ -1071,7 +1070,7 @@ NCR53c7x0_init (struct Scsi_Host *host) {
1071 1070
1072 NCR53c7x0_driver_init (host); 1071 NCR53c7x0_driver_init (host);
1073 1072
1074 if (request_irq(host->irq, NCR53c7x0_intr, SA_SHIRQ, "53c7xx", host)) 1073 if (request_irq(host->irq, NCR53c7x0_intr, IRQF_SHARED, "53c7xx", host))
1075 { 1074 {
1076 printk("scsi%d : IRQ%d not free, detaching\n", 1075 printk("scsi%d : IRQ%d not free, detaching\n",
1077 host->host_no, host->irq); 1076 host->host_no, host->irq);
@@ -3452,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) {
3452 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, 3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3453 cmd_dataout += 4, ++i) { 3452 cmd_dataout += 4, ++i) {
3454 u32 vbuf = cmd->use_sg 3453 u32 vbuf = cmd->use_sg
3455 ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ 3454 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3456 ((struct scatterlist *)cmd->buffer)[i].offset 3455 ((struct scatterlist *)cmd->request_buffer)[i].offset
3457 : (u32)(cmd->request_buffer); 3456 : (u32)(cmd->request_buffer);
3458 u32 bbuf = virt_to_bus((void *)vbuf); 3457 u32 bbuf = virt_to_bus((void *)vbuf);
3459 u32 count = cmd->use_sg ? 3458 u32 count = cmd->use_sg ?
3460 ((struct scatterlist *)cmd->buffer)[i].length : 3459 ((struct scatterlist *)cmd->request_buffer)[i].length :
3461 cmd->request_bufflen; 3460 cmd->request_bufflen;
3462 3461
3463 /* 3462 /*
@@ -4233,7 +4232,7 @@ restart:
4233 * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing 4232 * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
4234 * the same IRQ line. 4233 * the same IRQ line.
4235 * 4234 *
4236 * Inputs : Since we're using the SA_INTERRUPT interrupt handler 4235 * Inputs : Since we're using the IRQF_DISABLED interrupt handler
4237 * semantics, irq indicates the interrupt which invoked 4236 * semantics, irq indicates the interrupt which invoked
4238 * this handler. 4237 * this handler.
4239 * 4238 *
@@ -5418,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5418 5417
5419 if ((buffers = cmd->use_sg)) { 5418 if ((buffers = cmd->use_sg)) {
5420 for (offset = 0, 5419 for (offset = 0,
5421 segment = (struct scatterlist *) cmd->buffer; 5420 segment = (struct scatterlist *) cmd->request_buffer;
5422 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && 5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5423 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); 5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5424 --buffers, offset += segment->length, ++segment) 5423 --buffers, offset += segment->length, ++segment)
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index bde3d5834ade..4ea49fd7965e 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -29,7 +29,6 @@
29#define BusLogic_DriverVersion "2.1.16" 29#define BusLogic_DriverVersion "2.1.16"
30#define BusLogic_DriverDate "18 July 2002" 30#define BusLogic_DriverDate "18 July 2002"
31 31
32#include <linux/config.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/interrupt.h> 34#include <linux/interrupt.h>
@@ -663,7 +662,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
663 particular standard ISA I/O Address need not be probed. 662 particular standard ISA I/O Address need not be probed.
664 */ 663 */
665 PrimaryProbeInfo->IO_Address = 0; 664 PrimaryProbeInfo->IO_Address = 0;
666 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) { 665 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) {
667 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; 666 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter;
668 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation; 667 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation;
669 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest; 668 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest;
@@ -763,7 +762,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
763 PrimaryProbeInfo->Bus = Bus; 762 PrimaryProbeInfo->Bus = Bus;
764 PrimaryProbeInfo->Device = Device; 763 PrimaryProbeInfo->Device = Device;
765 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel; 764 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel;
766 PrimaryProbeInfo->PCI_Device = PCI_Device; 765 PrimaryProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
767 PCIMultiMasterCount++; 766 PCIMultiMasterCount++;
768 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) { 767 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) {
769 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; 768 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
@@ -774,7 +773,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
774 ProbeInfo->Bus = Bus; 773 ProbeInfo->Bus = Bus;
775 ProbeInfo->Device = Device; 774 ProbeInfo->Device = Device;
776 ProbeInfo->IRQ_Channel = IRQ_Channel; 775 ProbeInfo->IRQ_Channel = IRQ_Channel;
777 ProbeInfo->PCI_Device = PCI_Device; 776 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
778 NonPrimaryPCIMultiMasterCount++; 777 NonPrimaryPCIMultiMasterCount++;
779 PCIMultiMasterCount++; 778 PCIMultiMasterCount++;
780 } else 779 } else
@@ -824,7 +823,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
824 noting the PCI bus location and assigned IRQ Channel. 823 noting the PCI bus location and assigned IRQ Channel.
825 */ 824 */
826 PCI_Device = NULL; 825 PCI_Device = NULL;
827 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) { 826 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) {
828 unsigned char Bus; 827 unsigned char Bus;
829 unsigned char Device; 828 unsigned char Device;
830 unsigned int IRQ_Channel; 829 unsigned int IRQ_Channel;
@@ -851,7 +850,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
851 ProbeInfo->Bus = Bus; 850 ProbeInfo->Bus = Bus;
852 ProbeInfo->Device = Device; 851 ProbeInfo->Device = Device;
853 ProbeInfo->IRQ_Channel = IRQ_Channel; 852 ProbeInfo->IRQ_Channel = IRQ_Channel;
854 ProbeInfo->PCI_Device = PCI_Device; 853 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
855 break; 854 break;
856 } 855 }
857 } 856 }
@@ -875,7 +874,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
875 /* 874 /*
876 Interrogate PCI Configuration Space for any FlashPoint Host Adapters. 875 Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
877 */ 876 */
878 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) { 877 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) {
879 unsigned char Bus; 878 unsigned char Bus;
880 unsigned char Device; 879 unsigned char Device;
881 unsigned int IRQ_Channel; 880 unsigned int IRQ_Channel;
@@ -924,7 +923,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
924 ProbeInfo->Bus = Bus; 923 ProbeInfo->Bus = Bus;
925 ProbeInfo->Device = Device; 924 ProbeInfo->Device = Device;
926 ProbeInfo->IRQ_Channel = IRQ_Channel; 925 ProbeInfo->IRQ_Channel = IRQ_Channel;
927 ProbeInfo->PCI_Device = PCI_Device; 926 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
928 FlashPointCount++; 927 FlashPointCount++;
929 } else 928 } else
930 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL); 929 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL);
@@ -1845,7 +1844,7 @@ static boolean __init BusLogic_AcquireResources(struct BusLogic_HostAdapter *Hos
1845 /* 1844 /*
1846 Acquire shared access to the IRQ Channel. 1845 Acquire shared access to the IRQ Channel.
1847 */ 1846 */
1848 if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler, SA_SHIRQ, HostAdapter->FullModelName, HostAdapter) < 0) { 1847 if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler, IRQF_SHARED, HostAdapter->FullModelName, HostAdapter) < 0) {
1849 BusLogic_Error("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", HostAdapter, HostAdapter->IRQ_Channel); 1848 BusLogic_Error("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", HostAdapter, HostAdapter->IRQ_Channel);
1850 return false; 1849 return false;
1851 } 1850 }
@@ -1891,6 +1890,7 @@ static void BusLogic_ReleaseResources(struct BusLogic_HostAdapter *HostAdapter)
1891 */ 1890 */
1892 if (HostAdapter->MailboxSpace) 1891 if (HostAdapter->MailboxSpace)
1893 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle); 1892 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle);
1893 pci_dev_put(HostAdapter->PCI_Device);
1894 HostAdapter->MailboxSpace = NULL; 1894 HostAdapter->MailboxSpace = NULL;
1895 HostAdapter->MailboxSpaceHandle = 0; 1895 HostAdapter->MailboxSpaceHandle = 0;
1896 HostAdapter->MailboxSize = 0; 1896 HostAdapter->MailboxSize = 0;
@@ -2177,6 +2177,7 @@ static int __init BusLogic_init(void)
2177{ 2177{
2178 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex; 2178 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex;
2179 struct BusLogic_HostAdapter *PrototypeHostAdapter; 2179 struct BusLogic_HostAdapter *PrototypeHostAdapter;
2180 int ret = 0;
2180 2181
2181#ifdef MODULE 2182#ifdef MODULE
2182 if (BusLogic) 2183 if (BusLogic)
@@ -2283,25 +2284,49 @@ static int __init BusLogic_init(void)
2283 perform Target Device Inquiry. 2284 perform Target Device Inquiry.
2284 */ 2285 */
2285 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && 2286 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
2286 BusLogic_ReportHostAdapterConfiguration(HostAdapter) && BusLogic_AcquireResources(HostAdapter) && BusLogic_CreateInitialCCBs(HostAdapter) && BusLogic_InitializeHostAdapter(HostAdapter) && BusLogic_TargetDeviceInquiry(HostAdapter)) { 2287 BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
2288 BusLogic_AcquireResources(HostAdapter) &&
2289 BusLogic_CreateInitialCCBs(HostAdapter) &&
2290 BusLogic_InitializeHostAdapter(HostAdapter) &&
2291 BusLogic_TargetDeviceInquiry(HostAdapter)) {
2287 /* 2292 /*
2288 Initialization has been completed successfully. Release and 2293 Initialization has been completed successfully. Release and
2289 re-register usage of the I/O Address range so that the Model 2294 re-register usage of the I/O Address range so that the Model
2290 Name of the Host Adapter will appear, and initialize the SCSI 2295 Name of the Host Adapter will appear, and initialize the SCSI
2291 Host structure. 2296 Host structure.
2292 */ 2297 */
2293 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); 2298 release_region(HostAdapter->IO_Address,
2294 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, HostAdapter->FullModelName)) { 2299 HostAdapter->AddressCount);
2295 printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", (unsigned long) HostAdapter->IO_Address); 2300 if (!request_region(HostAdapter->IO_Address,
2301 HostAdapter->AddressCount,
2302 HostAdapter->FullModelName)) {
2303 printk(KERN_WARNING
2304 "BusLogic: Release and re-register of "
2305 "port 0x%04lx failed \n",
2306 (unsigned long)HostAdapter->IO_Address);
2296 BusLogic_DestroyCCBs(HostAdapter); 2307 BusLogic_DestroyCCBs(HostAdapter);
2297 BusLogic_ReleaseResources(HostAdapter); 2308 BusLogic_ReleaseResources(HostAdapter);
2298 list_del(&HostAdapter->host_list); 2309 list_del(&HostAdapter->host_list);
2299 scsi_host_put(Host); 2310 scsi_host_put(Host);
2311 ret = -ENOMEM;
2300 } else { 2312 } else {
2301 BusLogic_InitializeHostStructure(HostAdapter, Host); 2313 BusLogic_InitializeHostStructure(HostAdapter,
2302 scsi_add_host(Host, HostAdapter->PCI_Device ? &HostAdapter->PCI_Device->dev : NULL); 2314 Host);
2303 scsi_scan_host(Host); 2315 if (scsi_add_host(Host, HostAdapter->PCI_Device
2304 BusLogicHostAdapterCount++; 2316 ? &HostAdapter->PCI_Device->dev
2317 : NULL)) {
2318 printk(KERN_WARNING
2319 "BusLogic: scsi_add_host()"
2320 "failed!\n");
2321 BusLogic_DestroyCCBs(HostAdapter);
2322 BusLogic_ReleaseResources(HostAdapter);
2323 list_del(&HostAdapter->host_list);
2324 scsi_host_put(Host);
2325 ret = -ENODEV;
2326 } else {
2327 scsi_scan_host(Host);
2328 BusLogicHostAdapterCount++;
2329 }
2305 } 2330 }
2306 } else { 2331 } else {
2307 /* 2332 /*
@@ -2316,12 +2341,13 @@ static int __init BusLogic_init(void)
2316 BusLogic_ReleaseResources(HostAdapter); 2341 BusLogic_ReleaseResources(HostAdapter);
2317 list_del(&HostAdapter->host_list); 2342 list_del(&HostAdapter->host_list);
2318 scsi_host_put(Host); 2343 scsi_host_put(Host);
2344 ret = -ENODEV;
2319 } 2345 }
2320 } 2346 }
2321 kfree(PrototypeHostAdapter); 2347 kfree(PrototypeHostAdapter);
2322 kfree(BusLogic_ProbeInfoList); 2348 kfree(BusLogic_ProbeInfoList);
2323 BusLogic_ProbeInfoList = NULL; 2349 BusLogic_ProbeInfoList = NULL;
2324 return 0; 2350 return ret;
2325} 2351}
2326 2352
2327 2353
@@ -2955,6 +2981,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2955} 2981}
2956 2982
2957 2983
2984#if 0
2958/* 2985/*
2959 BusLogic_AbortCommand aborts Command if possible. 2986 BusLogic_AbortCommand aborts Command if possible.
2960*/ 2987*/
@@ -3025,6 +3052,7 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command)
3025 return SUCCESS; 3052 return SUCCESS;
3026} 3053}
3027 3054
3055#endif
3028/* 3056/*
3029 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all 3057 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
3030 currently executing SCSI Commands as having been Reset. 3058 currently executing SCSI Commands as having been Reset.
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 1aaa6569edac..9792e5af5252 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -28,7 +28,6 @@
28#ifndef _BUSLOGIC_H 28#ifndef _BUSLOGIC_H
29#define _BUSLOGIC_H 29#define _BUSLOGIC_H
30 30
31#include <linux/config.h>
32 31
33#ifndef PACKED 32#ifndef PACKED
34#define PACKED __attribute__((packed)) 33#define PACKED __attribute__((packed))
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 8e3d949b7118..7c0068049586 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -15,7 +15,6 @@
15 15
16*/ 16*/
17 17
18#include <linux/config.h>
19 18
20#ifndef CONFIG_SCSI_OMIT_FLASHPOINT 19#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
21 20
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 44728ae3fe77..c4dfcc91ddda 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -27,6 +27,11 @@ config SCSI
27 However, do not compile this as a module if your root file system 27 However, do not compile this as a module if your root file system
28 (the one containing the directory /) is located on a SCSI device. 28 (the one containing the directory /) is located on a SCSI device.
29 29
30config SCSI_NETLINK
31 bool
32 default n
33 select NET
34
30config SCSI_PROC_FS 35config SCSI_PROC_FS
31 bool "legacy /proc/scsi/ support" 36 bool "legacy /proc/scsi/ support"
32 depends on SCSI && PROC_FS 37 depends on SCSI && PROC_FS
@@ -209,7 +214,7 @@ config SCSI_LOGGING
209 there should be no noticeable performance impact as long as you have 214 there should be no noticeable performance impact as long as you have
210 logging turned off. 215 logging turned off.
211 216
212menu "SCSI Transport Attributes" 217menu "SCSI Transports"
213 depends on SCSI 218 depends on SCSI
214 219
215config SCSI_SPI_ATTRS 220config SCSI_SPI_ATTRS
@@ -222,6 +227,7 @@ config SCSI_SPI_ATTRS
222config SCSI_FC_ATTRS 227config SCSI_FC_ATTRS
223 tristate "FiberChannel Transport Attributes" 228 tristate "FiberChannel Transport Attributes"
224 depends on SCSI 229 depends on SCSI
230 select SCSI_NETLINK
225 help 231 help
226 If you wish to export transport-specific information about 232 If you wish to export transport-specific information about
227 each attached FiberChannel device to sysfs, say Y. 233 each attached FiberChannel device to sysfs, say Y.
@@ -242,6 +248,8 @@ config SCSI_SAS_ATTRS
242 If you wish to export transport-specific information about 248 If you wish to export transport-specific information about
243 each attached SAS device to sysfs, say Y. 249 each attached SAS device to sysfs, say Y.
244 250
251source "drivers/scsi/libsas/Kconfig"
252
245endmenu 253endmenu
246 254
247menu "SCSI low-level drivers" 255menu "SCSI low-level drivers"
@@ -431,6 +439,7 @@ config SCSI_AIC7XXX_OLD
431 module will be called aic7xxx_old. 439 module will be called aic7xxx_old.
432 440
433source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 441source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
442source "drivers/scsi/aic94xx/Kconfig"
434 443
435# All the I2O code and drivers do not seem to be 64bit safe. 444# All the I2O code and drivers do not seem to be 64bit safe.
436config SCSI_DPT_I2O 445config SCSI_DPT_I2O
@@ -469,68 +478,21 @@ config SCSI_IN2000
469 To compile this driver as a module, choose M here: the 478 To compile this driver as a module, choose M here: the
470 module will be called in2000. 479 module will be called in2000.
471 480
472source "drivers/scsi/megaraid/Kconfig.megaraid" 481config SCSI_ARCMSR
473 482 tristate "ARECA ARC11X0[PCI-X]/ARC12X0[PCI-EXPRESS] SATA-RAID support"
474config SCSI_SATA 483 depends on PCI && SCSI
475 tristate "Serial ATA (SATA) support"
476 depends on SCSI
477 help
478 This driver family supports Serial ATA host controllers
479 and devices.
480
481 If unsure, say N.
482
483config SCSI_SATA_AHCI
484 tristate "AHCI SATA support"
485 depends on SCSI_SATA && PCI
486 help
487 This option enables support for AHCI Serial ATA.
488
489 If unsure, say N.
490
491config SCSI_SATA_SVW
492 tristate "ServerWorks Frodo / Apple K2 SATA support"
493 depends on SCSI_SATA && PCI
494 help
495 This option enables support for Broadcom/Serverworks/Apple K2
496 SATA support.
497
498 If unsure, say N.
499
500config SCSI_ATA_PIIX
501 tristate "Intel PIIX/ICH SATA support"
502 depends on SCSI_SATA && PCI
503 help
504 This option enables support for ICH5 Serial ATA.
505 If PATA support was enabled previously, this enables
506 support for select Intel PIIX/ICH PATA host controllers.
507
508 If unsure, say N.
509
510config SCSI_SATA_MV
511 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
512 depends on SCSI_SATA && PCI && EXPERIMENTAL
513 help
514 This option enables support for the Marvell Serial ATA family.
515 Currently supports 88SX[56]0[48][01] chips.
516
517 If unsure, say N.
518
519config SCSI_SATA_NV
520 tristate "NVIDIA SATA support"
521 depends on SCSI_SATA && PCI && EXPERIMENTAL
522 help 484 help
523 This option enables support for NVIDIA Serial ATA. 485 This driver supports all of ARECA's SATA RAID controller cards.
486 This is an ARECA-maintained driver by Erich Chen.
487 If you have any problems, please mail to: < erich@areca.com.tw >
488 Areca supports Linux RAID config tools.
524 489
525 If unsure, say N. 490 < http://www.areca.com.tw >
526 491
527config SCSI_PDC_ADMA 492 To compile this driver as a module, choose M here: the
528 tristate "Pacific Digital ADMA support" 493 module will be called arcmsr (modprobe arcmsr).
529 depends on SCSI_SATA && PCI
530 help
531 This option enables support for Pacific Digital ADMA controllers
532 494
533 If unsure, say N. 495source "drivers/scsi/megaraid/Kconfig.megaraid"
534 496
535config SCSI_HPTIOP 497config SCSI_HPTIOP
536 tristate "HighPoint RocketRAID 3xxx Controller support" 498 tristate "HighPoint RocketRAID 3xxx Controller support"
@@ -542,83 +504,6 @@ config SCSI_HPTIOP
542 To compile this driver as a module, choose M here; the module 504 To compile this driver as a module, choose M here; the module
543 will be called hptiop. If unsure, say N. 505 will be called hptiop. If unsure, say N.
544 506
545config SCSI_SATA_QSTOR
546 tristate "Pacific Digital SATA QStor support"
547 depends on SCSI_SATA && PCI
548 help
549 This option enables support for Pacific Digital Serial ATA QStor.
550
551 If unsure, say N.
552
553config SCSI_SATA_PROMISE
554 tristate "Promise SATA TX2/TX4 support"
555 depends on SCSI_SATA && PCI
556 help
557 This option enables support for Promise Serial ATA TX2/TX4.
558
559 If unsure, say N.
560
561config SCSI_SATA_SX4
562 tristate "Promise SATA SX4 support"
563 depends on SCSI_SATA && PCI && EXPERIMENTAL
564 help
565 This option enables support for Promise Serial ATA SX4.
566
567 If unsure, say N.
568
569config SCSI_SATA_SIL
570 tristate "Silicon Image SATA support"
571 depends on SCSI_SATA && PCI && EXPERIMENTAL
572 help
573 This option enables support for Silicon Image Serial ATA.
574
575 If unsure, say N.
576
577config SCSI_SATA_SIL24
578 tristate "Silicon Image 3124/3132 SATA support"
579 depends on SCSI_SATA && PCI && EXPERIMENTAL
580 help
581 This option enables support for Silicon Image 3124/3132 Serial ATA.
582
583 If unsure, say N.
584
585config SCSI_SATA_SIS
586 tristate "SiS 964/180 SATA support"
587 depends on SCSI_SATA && PCI && EXPERIMENTAL
588 help
589 This option enables support for SiS Serial ATA 964/180.
590
591 If unsure, say N.
592
593config SCSI_SATA_ULI
594 tristate "ULi Electronics SATA support"
595 depends on SCSI_SATA && PCI && EXPERIMENTAL
596 help
597 This option enables support for ULi Electronics SATA.
598
599 If unsure, say N.
600
601config SCSI_SATA_VIA
602 tristate "VIA SATA support"
603 depends on SCSI_SATA && PCI
604 help
605 This option enables support for VIA Serial ATA.
606
607 If unsure, say N.
608
609config SCSI_SATA_VITESSE
610 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
611 depends on SCSI_SATA && PCI
612 help
613 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
614
615 If unsure, say N.
616
617config SCSI_SATA_INTEL_COMBINED
618 bool
619 depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
620 default y
621
622config SCSI_BUSLOGIC 507config SCSI_BUSLOGIC
623 tristate "BusLogic SCSI support" 508 tristate "BusLogic SCSI support"
624 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API 509 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
@@ -1053,6 +938,13 @@ config 53C700_LE_ON_BE
1053 depends on SCSI_LASI700 938 depends on SCSI_LASI700
1054 default y 939 default y
1055 940
941config SCSI_STEX
942 tristate "Promise SuperTrak EX Series support"
943 depends on PCI && SCSI
944 ---help---
945 This driver supports Promise SuperTrak EX8350/8300/16350/16300
946 Storage controllers.
947
1056config SCSI_SYM53C8XX_2 948config SCSI_SYM53C8XX_2
1057 tristate "SYM53C8XX Version 2 SCSI support" 949 tristate "SYM53C8XX Version 2 SCSI support"
1058 depends on PCI && SCSI 950 depends on PCI && SCSI
@@ -1169,7 +1061,7 @@ config SCSI_NCR_Q720
1169 you do not have this SCSI card, so say N. 1061 you do not have this SCSI card, so say N.
1170 1062
1171config SCSI_NCR53C8XX_DEFAULT_TAGS 1063config SCSI_NCR53C8XX_DEFAULT_TAGS
1172 int " default tagged command queue depth" 1064 int "default tagged command queue depth"
1173 depends on SCSI_ZALON || SCSI_NCR_Q720 1065 depends on SCSI_ZALON || SCSI_NCR_Q720
1174 default "8" 1066 default "8"
1175 ---help--- 1067 ---help---
@@ -1195,7 +1087,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
1195 There is no safe option other than using good SCSI devices. 1087 There is no safe option other than using good SCSI devices.
1196 1088
1197config SCSI_NCR53C8XX_MAX_TAGS 1089config SCSI_NCR53C8XX_MAX_TAGS
1198 int " maximum number of queued commands" 1090 int "maximum number of queued commands"
1199 depends on SCSI_ZALON || SCSI_NCR_Q720 1091 depends on SCSI_ZALON || SCSI_NCR_Q720
1200 default "32" 1092 default "32"
1201 ---help--- 1093 ---help---
@@ -1212,7 +1104,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
1212 There is no safe option and the default answer is recommended. 1104 There is no safe option and the default answer is recommended.
1213 1105
1214config SCSI_NCR53C8XX_SYNC 1106config SCSI_NCR53C8XX_SYNC
1215 int " synchronous transfers frequency in MHz" 1107 int "synchronous transfers frequency in MHz"
1216 depends on SCSI_ZALON || SCSI_NCR_Q720 1108 depends on SCSI_ZALON || SCSI_NCR_Q720
1217 default "20" 1109 default "20"
1218 ---help--- 1110 ---help---
@@ -1246,7 +1138,7 @@ config SCSI_NCR53C8XX_SYNC
1246 terminations and SCSI conformant devices. 1138 terminations and SCSI conformant devices.
1247 1139
1248config SCSI_NCR53C8XX_PROFILE 1140config SCSI_NCR53C8XX_PROFILE
1249 bool " enable profiling" 1141 bool "enable profiling"
1250 depends on SCSI_ZALON || SCSI_NCR_Q720 1142 depends on SCSI_ZALON || SCSI_NCR_Q720
1251 help 1143 help
1252 This option allows you to enable profiling information gathering. 1144 This option allows you to enable profiling information gathering.
@@ -1257,7 +1149,7 @@ config SCSI_NCR53C8XX_PROFILE
1257 The normal answer therefore is N. 1149 The normal answer therefore is N.
1258 1150
1259config SCSI_NCR53C8XX_NO_DISCONNECT 1151config SCSI_NCR53C8XX_NO_DISCONNECT
1260 bool " not allow targets to disconnect" 1152 bool "not allow targets to disconnect"
1261 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0 1153 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
1262 help 1154 help
1263 This option is only provided for safety if you suspect some SCSI 1155 This option is only provided for safety if you suspect some SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 84d546323dc7..1ef951be7a5d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o 32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o 33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o 34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
35 36
36obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 37obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
38obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
37obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o 39obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
38obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o 40obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
39obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o 41obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
@@ -58,6 +60,7 @@ obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
58obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o 60obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
59obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o 61obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
60obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o 62obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
63obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
61obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o 64obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
62obj-$(CONFIG_SCSI_AHA152X) += aha152x.o 65obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
63obj-$(CONFIG_SCSI_AHA1542) += aha1542.o 66obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
@@ -66,6 +69,7 @@ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
66obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ 69obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
67obj-$(CONFIG_SCSI_AACRAID) += aacraid/ 70obj-$(CONFIG_SCSI_AACRAID) += aacraid/
68obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 71obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
72obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
69obj-$(CONFIG_SCSI_IPS) += ips.o 73obj-$(CONFIG_SCSI_IPS) += ips.o
70obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 74obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
71obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
@@ -121,22 +125,8 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
121obj-$(CONFIG_SCSI_NSP32) += nsp32.o 125obj-$(CONFIG_SCSI_NSP32) += nsp32.o
122obj-$(CONFIG_SCSI_IPR) += ipr.o 126obj-$(CONFIG_SCSI_IPR) += ipr.o
123obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
124obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
125obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
126obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
127obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
128obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
129obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
130obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
131obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
132obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
133obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
134obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
135obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
136obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
137obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
138obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
139obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 128obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
129obj-$(CONFIG_SCSI_STEX) += stex.o
140 130
141obj-$(CONFIG_ARM) += arm/ 131obj-$(CONFIG_ARM) += arm/
142 132
@@ -154,6 +144,7 @@ scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
154 scsicam.o scsi_error.o scsi_lib.o \ 144 scsicam.o scsi_error.o scsi_lib.o \
155 scsi_scan.o scsi_sysfs.o \ 145 scsi_scan.o scsi_sysfs.o \
156 scsi_devinfo.o 146 scsi_devinfo.o
147scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
157scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 148scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
158scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 149scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
159 150
@@ -165,7 +156,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
165CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 156CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
166zalon7xx-objs := zalon.o ncr53c8xx.o 157zalon7xx-objs := zalon.o ncr53c8xx.o
167NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 158NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
168libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
169oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 159oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
170 160
171# Files generated that shall be removed upon make clean 161# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index fa57e0b4a5fd..616810ad17d8 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -500,7 +500,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
500/* 500/*
501 * Function : int should_disconnect (unsigned char cmd) 501 * Function : int should_disconnect (unsigned char cmd)
502 * 502 *
503 * Purpose : decide weather a command would normally disconnect or 503 * Purpose : decide whether a command would normally disconnect or
504 * not, since if it won't disconnect we should go to sleep. 504 * not, since if it won't disconnect we should go to sleep.
505 * 505 *
506 * Input : cmd - opcode of SCSI command 506 * Input : cmd - opcode of SCSI command
@@ -585,7 +585,7 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
585 NCR5380_setup(instance); 585 NCR5380_setup(instance);
586 586
587 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) 587 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
588 if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", NULL) == 0)) 588 if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
589 trying_irqs |= mask; 589 trying_irqs |= mask;
590 590
591 timeout = jiffies + (250 * HZ / 1000); 591 timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index c7dd0154d012..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25 25
26#include <linux/config.h>
27#include <linux/kernel.h> 26#include <linux/kernel.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
29#include <linux/types.h> 28#include <linux/types.h>
@@ -912,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
912 sp->SCp.ptr = 911 sp->SCp.ptr =
913 (char *) virt_to_phys(sp->request_buffer); 912 (char *) virt_to_phys(sp->request_buffer);
914 } else { 913 } else {
915 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 914 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
916 sp->SCp.buffers_residual = sp->use_sg - 1; 915 sp->SCp.buffers_residual = sp->use_sg - 1;
917 sp->SCp.this_residual = sp->SCp.buffer->length; 916 sp->SCp.this_residual = sp->SCp.buffer->length;
918 if (esp->dma_mmu_get_scsi_sgl) 917 if (esp->dma_mmu_get_scsi_sgl)
@@ -2153,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
2153 */ 2152 */
2154static int esp_should_clear_sync(Scsi_Cmnd *sp) 2153static int esp_should_clear_sync(Scsi_Cmnd *sp)
2155{ 2154{
2156 unchar cmd1 = sp->cmnd[0]; 2155 unchar cmd = sp->cmnd[0];
2157 unchar cmd2 = sp->data_cmnd[0];
2158 2156
2159 /* These cases are for spinning up a disk and 2157 /* These cases are for spinning up a disk and
2160 * waiting for that spinup to complete. 2158 * waiting for that spinup to complete.
2161 */ 2159 */
2162 if(cmd1 == START_STOP || 2160 if(cmd == START_STOP)
2163 cmd2 == START_STOP)
2164 return 0; 2161 return 0;
2165 2162
2166 if(cmd1 == TEST_UNIT_READY || 2163 if(cmd == TEST_UNIT_READY)
2167 cmd2 == TEST_UNIT_READY)
2168 return 0; 2164 return 0;
2169 2165
2170 /* One more special case for SCSI tape drives, 2166 /* One more special case for SCSI tape drives,
2171 * this is what is used to probe the device for 2167 * this is what is used to probe the device for
2172 * completion of a rewind or tape load operation. 2168 * completion of a rewind or tape load operation.
2173 */ 2169 */
2174 if(sp->device->type == TYPE_TAPE) { 2170 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2175 if(cmd1 == MODE_SENSE || 2171 return 0;
2176 cmd2 == MODE_SENSE)
2177 return 0;
2178 }
2179 2172
2180 return 1; 2173 return 1;
2181} 2174}
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
index 65a9b377a410..481653c977cf 100644
--- a/drivers/scsi/NCR53C9x.h
+++ b/drivers/scsi/NCR53C9x.h
@@ -13,7 +13,6 @@
13#ifndef NCR53C9X_H 13#ifndef NCR53C9X_H
14#define NCR53C9X_H 14#define NCR53C9X_H
15 15
16#include <linux/config.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18 17
19/* djweis for mac driver */ 18/* djweis for mac driver */
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 577e63499778..d05681f9d81a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
114MODULE_LICENSE("GPL"); 114MODULE_LICENSE("GPL");
115module_param(NCR_D700, charp, 0); 115module_param(NCR_D700, charp, 0);
116 116
117static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 117static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; 118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
119 119
120#ifdef MODULE 120#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
173 char pad; 173 char pad;
174}; 174};
175 175
176static int 176static int __devinit
177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, 177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
178 int slot, u32 region, int differential) 178 int slot, u32 region, int differential)
179{ 179{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs)
243 * essentially connectecd to the MCA bus independently, it is easier 243 * essentially connectecd to the MCA bus independently, it is easier
244 * to set them up as two separate host adapters, rather than one 244 * to set them up as two separate host adapters, rather than one
245 * adapter with two channels */ 245 * adapter with two channels */
246static int 246static int __devinit
247NCR_D700_probe(struct device *dev) 247NCR_D700_probe(struct device *dev)
248{ 248{
249 struct NCR_D700_private *p; 249 struct NCR_D700_private *p;
@@ -320,7 +320,7 @@ NCR_D700_probe(struct device *dev)
320 memset(p, '\0', sizeof(*p)); 320 memset(p, '\0', sizeof(*p));
321 p->dev = dev; 321 p->dev = dev;
322 snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id); 322 snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id);
323 if (request_irq(irq, NCR_D700_intr, SA_SHIRQ, p->name, p)) { 323 if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
324 printk(KERN_ERR "D700: request_irq failed\n"); 324 printk(KERN_ERR "D700: request_irq failed\n");
325 kfree(p); 325 kfree(p);
326 return -EBUSY; 326 return -EBUSY;
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev)
329 for (i = 0; i < 2; i++) { 329 for (i = 0; i < 2; i++) {
330 int err; 330 int err;
331 331
332 if ((err = NCR_D700_probe_one(p, i, slot, irq, 332 if ((err = NCR_D700_probe_one(p, i, irq, slot,
333 offset_addr + (0x80 * i), 333 offset_addr + (0x80 * i),
334 differential)) != 0) 334 differential)) != 0)
335 printk("D700: SIOP%d: probe failed, error = %d\n", 335 printk("D700: SIOP%d: probe failed, error = %d\n",
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
349 return 0; 349 return 0;
350} 350}
351 351
352static void 352static void __devexit
353NCR_D700_remove_one(struct Scsi_Host *host) 353NCR_D700_remove_one(struct Scsi_Host *host)
354{ 354{
355 scsi_remove_host(host); 355 scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
359 release_region(host->base, 64); 359 release_region(host->base, 64);
360} 360}
361 361
362static int 362static int __devexit
363NCR_D700_remove(struct device *dev) 363NCR_D700_remove(struct device *dev)
364{ 364{
365 struct NCR_D700_private *p = dev_get_drvdata(dev); 365 struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
380 .name = "NCR_D700", 380 .name = "NCR_D700",
381 .bus = &mca_bus_type, 381 .bus = &mca_bus_type,
382 .probe = NCR_D700_probe, 382 .probe = NCR_D700_probe,
383 .remove = NCR_D700_remove, 383 .remove = __devexit_p(NCR_D700_remove),
384 }, 384 },
385}; 385};
386 386
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index 9d18ec90510f..c39ffbb86e39 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -265,7 +265,7 @@ NCR_Q720_probe(struct device *dev)
265 p->irq = irq; 265 p->irq = irq;
266 p->siops = siops; 266 p->siops = siops;
267 267
268 if (request_irq(irq, NCR_Q720_intr, SA_SHIRQ, "NCR_Q720", p)) { 268 if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
269 printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq); 269 printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
270 goto out_release; 270 goto out_release;
271 } 271 }
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 3dce21c78737..d7e9fab54c60 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1120,7 +1120,7 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
1120 shost->sg_tablesize = TOTAL_SG_ENTRY; 1120 shost->sg_tablesize = TOTAL_SG_ENTRY;
1121 1121
1122 /* Initial orc chip */ 1122 /* Initial orc chip */
1123 error = request_irq(pdev->irq, inia100_intr, SA_SHIRQ, 1123 error = request_irq(pdev->irq, inia100_intr, IRQF_SHARED,
1124 "inia100", shost); 1124 "inia100", shost);
1125 if (error < 0) { 1125 if (error < 0) {
1126 printk(KERN_WARNING "inia100: unable to get irq %d\n", 1126 printk(KERN_WARNING "inia100: unable to get irq %d\n",
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 54996eaae979..085406928605 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -40,7 +40,7 @@ static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp)
40 return IRQ_HANDLED; 40 return IRQ_HANDLED;
41} 41}
42 42
43static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 43static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 44{
45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
46 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 46 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -115,7 +115,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
115 return 0; 115 return 0;
116} 116}
117 117
118static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 118static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
119 int status) 119 int status)
120{ 120{
121 /* disable SCSI interrupts */ 121 /* disable SCSI interrupts */
@@ -208,7 +208,7 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
208 regs.SASR = &(DMA(instance)->SASR); 208 regs.SASR = &(DMA(instance)->SASR);
209 regs.SCMD = &(DMA(instance)->SCMD); 209 regs.SCMD = &(DMA(instance)->SCMD);
210 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 210 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
211 request_irq(IRQ_AMIGA_PORTS, a2091_intr, SA_SHIRQ, "A2091 SCSI", 211 request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
212 instance); 212 instance);
213 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 213 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
214 num_a2091++; 214 num_a2091++;
@@ -217,7 +217,7 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
217 return num_a2091; 217 return num_a2091;
218} 218}
219 219
220static int a2091_bus_reset(Scsi_Cmnd *cmd) 220static int a2091_bus_reset(struct scsi_cmnd *cmd)
221{ 221{
222 /* FIXME perform bus-specific reset */ 222 /* FIXME perform bus-specific reset */
223 223
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 22d6a13dd8be..fe809bc88d73 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -13,10 +13,6 @@
13 13
14int a2091_detect(struct scsi_host_template *); 14int a2091_detect(struct scsi_host_template *);
15int a2091_release(struct Scsi_Host *); 15int a2091_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index f425d424bf08..7bf46d40b561 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -44,7 +44,7 @@ static irqreturn_t a3000_intr (int irq, void *dummy, struct pt_regs *fp)
44 return IRQ_NONE; 44 return IRQ_NONE;
45} 45}
46 46
47static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 47static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
48{ 48{
49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
50 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 50 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -110,8 +110,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
110 return 0; 110 return 0;
111} 111}
112 112
113static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 113static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
114 int status) 114 int status)
115{ 115{
116 /* disable SCSI interrupts */ 116 /* disable SCSI interrupts */
117 unsigned short cntr = CNTR_PDMD; 117 unsigned short cntr = CNTR_PDMD;
@@ -190,7 +190,7 @@ int __init a3000_detect(struct scsi_host_template *tpnt)
190 regs.SASR = &(DMA(a3000_host)->SASR); 190 regs.SASR = &(DMA(a3000_host)->SASR);
191 regs.SCMD = &(DMA(a3000_host)->SCMD); 191 regs.SCMD = &(DMA(a3000_host)->SCMD);
192 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); 192 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
193 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, SA_SHIRQ, "A3000 SCSI", 193 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
194 a3000_intr)) 194 a3000_intr))
195 goto fail_irq; 195 goto fail_irq;
196 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN; 196 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
@@ -205,7 +205,7 @@ fail_register:
205 return 0; 205 return 0;
206} 206}
207 207
208static int a3000_bus_reset(Scsi_Cmnd *cmd) 208static int a3000_bus_reset(struct scsi_cmnd *cmd)
209{ 209{
210 /* FIXME perform bus-specific reset */ 210 /* FIXME perform bus-specific reset */
211 211
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 5535a65150a4..44a4ec7b4650 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -13,10 +13,6 @@
13 13
14int a3000_detect(struct scsi_host_template *); 14int a3000_detect(struct scsi_host_template *);
15int a3000_release(struct Scsi_Host *); 15int a3000_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 83b5c7d085f2..ac108f9e2674 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,13 +169,17 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control block
169int acbsize = -1; 169int acbsize = -1;
170module_param(acbsize, int, S_IRUGO|S_IWUSR); 170module_param(acbsize, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172
173int expose_physicals = 0;
174module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on");
172/** 176/**
173 * aac_get_config_status - check the adapter configuration 177 * aac_get_config_status - check the adapter configuration
174 * @common: adapter to query 178 * @common: adapter to query
175 * 179 *
176 * Query config status, and commit the configuration if needed. 180 * Query config status, and commit the configuration if needed.
177 */ 181 */
178int aac_get_config_status(struct aac_dev *dev) 182int aac_get_config_status(struct aac_dev *dev, int commit_flag)
179{ 183{
180 int status = 0; 184 int status = 0;
181 struct fib * fibptr; 185 struct fib * fibptr;
@@ -219,7 +223,7 @@ int aac_get_config_status(struct aac_dev *dev)
219 aac_fib_complete(fibptr); 223 aac_fib_complete(fibptr);
220 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 224 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
221 if (status >= 0) { 225 if (status >= 0) {
222 if (commit == 1) { 226 if ((commit == 1) || commit_flag) {
223 struct aac_commit_config * dinfo; 227 struct aac_commit_config * dinfo;
224 aac_fib_init(fibptr); 228 aac_fib_init(fibptr);
225 dinfo = (struct aac_commit_config *) fib_data(fibptr); 229 dinfo = (struct aac_commit_config *) fib_data(fibptr);
@@ -489,6 +493,8 @@ int aac_probe_container(struct aac_dev *dev, int cid)
489 unsigned instance; 493 unsigned instance;
490 494
491 fsa_dev_ptr = dev->fsa_dev; 495 fsa_dev_ptr = dev->fsa_dev;
496 if (!fsa_dev_ptr)
497 return -ENOMEM;
492 instance = dev->scsi_host_ptr->unique_id; 498 instance = dev->scsi_host_ptr->unique_id;
493 499
494 if (!(fibptr = aac_fib_alloc(dev))) 500 if (!(fibptr = aac_fib_alloc(dev)))
@@ -782,8 +788,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
782 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 788 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
783 } 789 }
784 790
785 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 791 if (!dev->in_reset) {
786 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 792 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
793 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
787 dev->name, 794 dev->name,
788 dev->id, 795 dev->id,
789 tmp>>24, 796 tmp>>24,
@@ -792,20 +799,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
792 le32_to_cpu(dev->adapter_info.kernelbuild), 799 le32_to_cpu(dev->adapter_info.kernelbuild),
793 (int)sizeof(dev->supplement_adapter_info.BuildDate), 800 (int)sizeof(dev->supplement_adapter_info.BuildDate),
794 dev->supplement_adapter_info.BuildDate); 801 dev->supplement_adapter_info.BuildDate);
795 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 802 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
796 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 803 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
797 dev->name, dev->id, 804 dev->name, dev->id,
798 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 805 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
799 le32_to_cpu(dev->adapter_info.monitorbuild)); 806 le32_to_cpu(dev->adapter_info.monitorbuild));
800 tmp = le32_to_cpu(dev->adapter_info.biosrev); 807 tmp = le32_to_cpu(dev->adapter_info.biosrev);
801 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 808 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
802 dev->name, dev->id, 809 dev->name, dev->id,
803 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 810 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
804 le32_to_cpu(dev->adapter_info.biosbuild)); 811 le32_to_cpu(dev->adapter_info.biosbuild));
805 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 812 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
806 printk(KERN_INFO "%s%d: serial %x\n", 813 printk(KERN_INFO "%s%d: serial %x\n",
807 dev->name, dev->id, 814 dev->name, dev->id,
808 le32_to_cpu(dev->adapter_info.serial[0])); 815 le32_to_cpu(dev->adapter_info.serial[0]));
816 }
809 817
810 dev->nondasd_support = 0; 818 dev->nondasd_support = 0;
811 dev->raid_scsi_mode = 0; 819 dev->raid_scsi_mode = 0;
@@ -1392,6 +1400,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1392 struct scsi_cmnd *cmd; 1400 struct scsi_cmnd *cmd;
1393 struct scsi_device *sdev = scsicmd->device; 1401 struct scsi_device *sdev = scsicmd->device;
1394 int active = 0; 1402 int active = 0;
1403 struct aac_dev *aac;
1395 unsigned long flags; 1404 unsigned long flags;
1396 1405
1397 /* 1406 /*
@@ -1413,11 +1422,14 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1413 if (active) 1422 if (active)
1414 return SCSI_MLQUEUE_DEVICE_BUSY; 1423 return SCSI_MLQUEUE_DEVICE_BUSY;
1415 1424
1425 aac = (struct aac_dev *)scsicmd->device->host->hostdata;
1426 if (aac->in_reset)
1427 return SCSI_MLQUEUE_HOST_BUSY;
1428
1416 /* 1429 /*
1417 * Allocate and initialize a Fib 1430 * Allocate and initialize a Fib
1418 */ 1431 */
1419 if (!(cmd_fibcontext = 1432 if (!(cmd_fibcontext = aac_fib_alloc(aac)))
1420 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1421 return SCSI_MLQUEUE_HOST_BUSY; 1433 return SCSI_MLQUEUE_HOST_BUSY;
1422 1434
1423 aac_fib_init(cmd_fibcontext); 1435 aac_fib_init(cmd_fibcontext);
@@ -1470,6 +1482,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1470 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1482 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1471 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1483 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1472 1484
1485 if (fsa_dev_ptr == NULL)
1486 return -1;
1473 /* 1487 /*
1474 * If the bus, id or lun is out of range, return fail 1488 * If the bus, id or lun is out of range, return fail
1475 * Test does not apply to ID 16, the pseudo id for the controller 1489 * Test does not apply to ID 16, the pseudo id for the controller
@@ -1499,6 +1513,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1499 case INQUIRY: 1513 case INQUIRY:
1500 case READ_CAPACITY: 1514 case READ_CAPACITY:
1501 case TEST_UNIT_READY: 1515 case TEST_UNIT_READY:
1516 if (dev->in_reset)
1517 return -1;
1502 spin_unlock_irq(host->host_lock); 1518 spin_unlock_irq(host->host_lock);
1503 aac_probe_container(dev, cid); 1519 aac_probe_container(dev, cid);
1504 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1520 if ((fsa_dev_ptr[cid].valid & 1) == 0)
@@ -1523,7 +1539,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1523 return 0; 1539 return 0;
1524 } 1540 }
1525 } else { /* check for physical non-dasd devices */ 1541 } else { /* check for physical non-dasd devices */
1526 if(dev->nondasd_support == 1){ 1542 if ((dev->nondasd_support == 1) || expose_physicals) {
1543 if (dev->in_reset)
1544 return -1;
1527 return aac_send_srb_fib(scsicmd); 1545 return aac_send_srb_fib(scsicmd);
1528 } else { 1546 } else {
1529 scsicmd->result = DID_NO_CONNECT << 16; 1547 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1579,6 +1597,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1579 scsicmd->scsi_done(scsicmd); 1597 scsicmd->scsi_done(scsicmd);
1580 return 0; 1598 return 0;
1581 } 1599 }
1600 if (dev->in_reset)
1601 return -1;
1582 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1602 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1583 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1603 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1584 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1604 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1734,6 +1754,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1734 case READ_10: 1754 case READ_10:
1735 case READ_12: 1755 case READ_12:
1736 case READ_16: 1756 case READ_16:
1757 if (dev->in_reset)
1758 return -1;
1737 /* 1759 /*
1738 * Hack to keep track of ordinal number of the device that 1760 * Hack to keep track of ordinal number of the device that
1739 * corresponds to a container. Needed to convert 1761 * corresponds to a container. Needed to convert
@@ -1752,6 +1774,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1752 case WRITE_10: 1774 case WRITE_10:
1753 case WRITE_12: 1775 case WRITE_12:
1754 case WRITE_16: 1776 case WRITE_16:
1777 if (dev->in_reset)
1778 return -1;
1755 return aac_write(scsicmd, cid); 1779 return aac_write(scsicmd, cid);
1756 1780
1757 case SYNCHRONIZE_CACHE: 1781 case SYNCHRONIZE_CACHE:
@@ -1782,6 +1806,8 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
1782 struct fsa_dev_info *fsa_dev_ptr; 1806 struct fsa_dev_info *fsa_dev_ptr;
1783 1807
1784 fsa_dev_ptr = dev->fsa_dev; 1808 fsa_dev_ptr = dev->fsa_dev;
1809 if (!fsa_dev_ptr)
1810 return -EBUSY;
1785 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 1811 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1786 return -EFAULT; 1812 return -EFAULT;
1787 if (qd.cnum == -1) 1813 if (qd.cnum == -1)
@@ -1820,6 +1846,8 @@ static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1820 struct fsa_dev_info *fsa_dev_ptr; 1846 struct fsa_dev_info *fsa_dev_ptr;
1821 1847
1822 fsa_dev_ptr = dev->fsa_dev; 1848 fsa_dev_ptr = dev->fsa_dev;
1849 if (!fsa_dev_ptr)
1850 return -EBUSY;
1823 1851
1824 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1852 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1825 return -EFAULT; 1853 return -EFAULT;
@@ -1843,6 +1871,8 @@ static int delete_disk(struct aac_dev *dev, void __user *arg)
1843 struct fsa_dev_info *fsa_dev_ptr; 1871 struct fsa_dev_info *fsa_dev_ptr;
1844 1872
1845 fsa_dev_ptr = dev->fsa_dev; 1873 fsa_dev_ptr = dev->fsa_dev;
1874 if (!fsa_dev_ptr)
1875 return -EBUSY;
1846 1876
1847 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1877 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1848 return -EFAULT; 1878 return -EFAULT;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d0eecd4bec83..eb3ed91bac79 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -494,6 +494,7 @@ struct adapter_ops
494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
495 int (*adapter_check_health)(struct aac_dev *dev); 495 int (*adapter_check_health)(struct aac_dev *dev);
496 int (*adapter_send)(struct fib * fib); 496 int (*adapter_send)(struct fib * fib);
497 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
497}; 498};
498 499
499/* 500/*
@@ -682,14 +683,6 @@ struct rx_inbound {
682 __le32 Mailbox[8]; 683 __le32 Mailbox[8];
683}; 684};
684 685
685#define InboundMailbox0 IndexRegs.Mailbox[0]
686#define InboundMailbox1 IndexRegs.Mailbox[1]
687#define InboundMailbox2 IndexRegs.Mailbox[2]
688#define InboundMailbox3 IndexRegs.Mailbox[3]
689#define InboundMailbox4 IndexRegs.Mailbox[4]
690#define InboundMailbox5 IndexRegs.Mailbox[5]
691#define InboundMailbox6 IndexRegs.Mailbox[6]
692
693#define INBOUNDDOORBELL_0 0x00000001 686#define INBOUNDDOORBELL_0 0x00000001
694#define INBOUNDDOORBELL_1 0x00000002 687#define INBOUNDDOORBELL_1 0x00000002
695#define INBOUNDDOORBELL_2 0x00000004 688#define INBOUNDDOORBELL_2 0x00000004
@@ -1010,6 +1003,8 @@ struct aac_dev
1010 struct rx_registers __iomem *rx; 1003 struct rx_registers __iomem *rx;
1011 struct rkt_registers __iomem *rkt; 1004 struct rkt_registers __iomem *rkt;
1012 } regs; 1005 } regs;
1006 volatile void __iomem *base;
1007 volatile struct rx_inbound __iomem *IndexRegs;
1013 u32 OIMR; /* Mask Register Cache */ 1008 u32 OIMR; /* Mask Register Cache */
1014 /* 1009 /*
1015 * AIF thread states 1010 * AIF thread states
@@ -1029,6 +1024,7 @@ struct aac_dev
1029 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1024 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1030 u8 raw_io_64; 1025 u8 raw_io_64;
1031 u8 printf_enabled; 1026 u8 printf_enabled;
1027 u8 in_reset;
1032}; 1028};
1033 1029
1034#define aac_adapter_interrupt(dev) \ 1030#define aac_adapter_interrupt(dev) \
@@ -1049,6 +1045,9 @@ struct aac_dev
1049#define aac_adapter_send(fib) \ 1045#define aac_adapter_send(fib) \
1050 ((fib)->dev)->a_ops.adapter_send(fib) 1046 ((fib)->dev)->a_ops.adapter_send(fib)
1051 1047
1048#define aac_adapter_ioremap(dev, size) \
1049 (dev)->a_ops.adapter_ioremap(dev, size)
1050
1052#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1051#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1053 1052
1054/* 1053/*
@@ -1524,7 +1523,6 @@ struct aac_get_name {
1524 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */ 1523 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
1525}; 1524};
1526 1525
1527#define CT_OK 218
1528struct aac_get_name_resp { 1526struct aac_get_name_resp {
1529 __le32 dummy0; 1527 __le32 dummy0;
1530 __le32 dummy1; 1528 __le32 dummy1;
@@ -1670,6 +1668,7 @@ extern struct aac_common aac_config;
1670#define RCV_TEMP_READINGS 0x00000025 1668#define RCV_TEMP_READINGS 0x00000025
1671#define GET_COMM_PREFERRED_SETTINGS 0x00000026 1669#define GET_COMM_PREFERRED_SETTINGS 0x00000026
1672#define IOP_RESET 0x00001000 1670#define IOP_RESET 0x00001000
1671#define IOP_RESET_ALWAYS 0x00001001
1673#define RE_INIT_ADAPTER 0x000000ee 1672#define RE_INIT_ADAPTER 0x000000ee
1674 1673
1675/* 1674/*
@@ -1788,7 +1787,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1788int aac_fib_complete(struct fib * context); 1787int aac_fib_complete(struct fib * context);
1789#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1790struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1791int aac_get_config_status(struct aac_dev *dev); 1790int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1792int aac_get_containers(struct aac_dev *dev); 1791int aac_get_containers(struct aac_dev *dev);
1793int aac_scsi_cmd(struct scsi_cmnd *cmd); 1792int aac_scsi_cmd(struct scsi_cmnd *cmd);
1794int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); 1793int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
@@ -1799,6 +1798,7 @@ int aac_sa_init(struct aac_dev *dev);
1799unsigned int aac_response_normal(struct aac_queue * q); 1798unsigned int aac_response_normal(struct aac_queue * q);
1800unsigned int aac_command_normal(struct aac_queue * q); 1799unsigned int aac_command_normal(struct aac_queue * q);
1801unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1801int aac_check_health(struct aac_dev * dev);
1802int aac_command_thread(void *data); 1802int aac_command_thread(void *data);
1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); 1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 255421de9d1a..da1d3a9212f8 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -38,7 +38,7 @@
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h> /* ssleep prototype */
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -140,7 +140,8 @@ cleanup:
140 fibptr->hw_fib_pa = hw_fib_pa; 140 fibptr->hw_fib_pa = hw_fib_pa;
141 fibptr->hw_fib = hw_fib; 141 fibptr->hw_fib = hw_fib;
142 } 142 }
143 aac_fib_free(fibptr); 143 if (retval != -EINTR)
144 aac_fib_free(fibptr);
144 return retval; 145 return retval;
145} 146}
146 147
@@ -297,7 +298,7 @@ return_fib:
297 spin_unlock_irqrestore(&dev->fib_lock, flags); 298 spin_unlock_irqrestore(&dev->fib_lock, flags);
298 /* If someone killed the AIF aacraid thread, restart it */ 299 /* If someone killed the AIF aacraid thread, restart it */
299 status = !dev->aif_thread; 300 status = !dev->aif_thread;
300 if (status && dev->queues && dev->fsa_dev) { 301 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
301 /* Be paranoid, be very paranoid! */ 302 /* Be paranoid, be very paranoid! */
302 kthread_stop(dev->thread); 303 kthread_stop(dev->thread);
303 ssleep(1); 304 ssleep(1);
@@ -621,7 +622,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
621 622
622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 623 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 624 if(actual_fibsize != fibsize){ // User made a mistake - should not continue
624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 625 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
626 "Raw SRB command calculated fibsize=%d "
627 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d "
628 "issued fibsize=%d\n",
629 actual_fibsize, user_srbcmd->sg.count,
630 sizeof(struct aac_srb), sizeof(struct sgentry),
631 fibsize));
625 rcode = -EINVAL; 632 rcode = -EINVAL;
626 goto cleanup; 633 goto cleanup;
627 } 634 }
@@ -663,6 +670,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
663 psg->count = cpu_to_le32(sg_indx+1); 670 psg->count = cpu_to_le32(sg_indx+1);
664 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 671 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
665 } 672 }
673 if (status == -EINTR) {
674 rcode = -EINTR;
675 goto cleanup;
676 }
666 677
667 if (status != 0){ 678 if (status != 0){
668 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 679 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
@@ -696,8 +707,10 @@ cleanup:
696 for(i=0; i <= sg_indx; i++){ 707 for(i=0; i <= sg_indx; i++){
697 kfree(sg_list[i]); 708 kfree(sg_list[i]);
698 } 709 }
699 aac_fib_complete(srbfib); 710 if (rcode != -EINTR) {
700 aac_fib_free(srbfib); 711 aac_fib_complete(srbfib);
712 aac_fib_free(srbfib);
713 }
701 714
702 return rcode; 715 return rcode;
703} 716}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 35b0a6ebd3f5..d5cf8b91a0e7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -92,28 +92,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
92 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); 92 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
93 init->AdapterFibsSize = cpu_to_le32(fibsize); 93 init->AdapterFibsSize = cpu_to_le32(fibsize);
94 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); 94 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
95 /* 95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
96 * number of 4k pages of host physical memory. The aacraid fw needs
97 * this number to be less than 4gb worth of pages. num_physpages is in
98 * system page units. New firmware doesn't have any issues with the
99 * mapping system, but older Firmware did, and had *troubles* dealing
100 * with the math overloading past 32 bits, thus we must limit this
101 * field.
102 *
103 * This assumes the memory is mapped zero->n, which isnt
104 * always true on real computers. It also has some slight problems
105 * with the GART on x86-64. I've btw never tried DMA from PCI space
106 * on this platform but don't be surprised if its problematic.
107 */
108#ifndef CONFIG_GART_IOMMU
109 if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
110 init->HostPhysMemPages =
111 cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
112 } else
113#endif
114 {
115 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
116 }
117 96
118 init->InitFlags = 0; 97 init->InitFlags = 0;
119 if (dev->new_comm_interface) { 98 if (dev->new_comm_interface) {
@@ -201,7 +180,7 @@ int aac_send_shutdown(struct aac_dev * dev)
201 -2 /* Timeout silently */, 1, 180 -2 /* Timeout silently */, 1,
202 NULL, NULL); 181 NULL, NULL);
203 182
204 if (status == 0) 183 if (status >= 0)
205 aac_fib_complete(fibctx); 184 aac_fib_complete(fibctx);
206 aac_fib_free(fibctx); 185 aac_fib_free(fibctx);
207 return status; 186 return status;
@@ -328,17 +307,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
328 if (status[1] & AAC_OPT_NEW_COMM) 307 if (status[1] & AAC_OPT_NEW_COMM)
329 dev->new_comm_interface = dev->a_ops.adapter_send != 0; 308 dev->new_comm_interface = dev->a_ops.adapter_send != 0;
330 if (dev->new_comm_interface && (status[2] > dev->base_size)) { 309 if (dev->new_comm_interface && (status[2] > dev->base_size)) {
331 iounmap(dev->regs.sa); 310 aac_adapter_ioremap(dev, 0);
332 dev->base_size = status[2]; 311 dev->base_size = status[2];
333 dprintk((KERN_DEBUG "ioremap(%lx,%d)\n", 312 if (aac_adapter_ioremap(dev, status[2])) {
334 host->base, status[2]));
335 dev->regs.sa = ioremap(host->base, status[2]);
336 if (dev->regs.sa == NULL) {
337 /* remap failed, go back ... */ 313 /* remap failed, go back ... */
338 dev->new_comm_interface = 0; 314 dev->new_comm_interface = 0;
339 dev->regs.sa = ioremap(host->base, 315 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
340 AAC_MIN_FOOTPRINT_SIZE);
341 if (dev->regs.sa == NULL) {
342 printk(KERN_WARNING 316 printk(KERN_WARNING
343 "aacraid: unable to map adapter.\n"); 317 "aacraid: unable to map adapter.\n");
344 return NULL; 318 return NULL;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f27419c66af..19e42ac07cb2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -40,8 +40,11 @@
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <linux/interrupt.h>
44#include <scsi/scsi.h>
43#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h> 46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
45#include <asm/semaphore.h> 48#include <asm/semaphore.h>
46 49
47#include "aacraid.h" 50#include "aacraid.h"
@@ -464,6 +467,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 467 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 468 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466 469
470 if (!dev->queues)
471 return -EBUSY;
467 q = &dev->queues->queue[AdapNormCmdQueue]; 472 q = &dev->queues->queue[AdapNormCmdQueue];
468 473
469 if(wait) 474 if(wait)
@@ -527,8 +532,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
527 } 532 }
528 udelay(5); 533 udelay(5);
529 } 534 }
530 } else 535 } else if (down_interruptible(&fibptr->event_wait)) {
531 down(&fibptr->event_wait); 536 spin_lock_irqsave(&fibptr->event_lock, flags);
537 if (fibptr->done == 0) {
538 fibptr->done = 2; /* Tell interrupt we aborted */
539 spin_unlock_irqrestore(&fibptr->event_lock, flags);
540 return -EINTR;
541 }
542 spin_unlock_irqrestore(&fibptr->event_lock, flags);
543 }
532 BUG_ON(fibptr->done == 0); 544 BUG_ON(fibptr->done == 0);
533 545
534 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 546 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
@@ -795,7 +807,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
795 807
796 /* Sniff for container changes */ 808 /* Sniff for container changes */
797 809
798 if (!dev) 810 if (!dev || !dev->fsa_dev)
799 return; 811 return;
800 container = (u32)-1; 812 container = (u32)-1;
801 813
@@ -1022,13 +1034,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1022 if (device) { 1034 if (device) {
1023 switch (device_config_needed) { 1035 switch (device_config_needed) {
1024 case DELETE: 1036 case DELETE:
1025 scsi_remove_device(device);
1026 break;
1027 case CHANGE: 1037 case CHANGE:
1028 if (!dev->fsa_dev[container].valid) {
1029 scsi_remove_device(device);
1030 break;
1031 }
1032 scsi_rescan_device(&device->sdev_gendev); 1038 scsi_rescan_device(&device->sdev_gendev);
1033 1039
1034 default: 1040 default:
@@ -1045,6 +1051,262 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1045 1051
1046} 1052}
1047 1053
1054static int _aac_reset_adapter(struct aac_dev *aac)
1055{
1056 int index, quirks;
1057 u32 ret;
1058 int retval;
1059 struct Scsi_Host *host;
1060 struct scsi_device *dev;
1061 struct scsi_cmnd *command;
1062 struct scsi_cmnd *command_list;
1063
1064 /*
1065 * Assumptions:
1066 * - host is locked.
1067 * - in_reset is asserted, so no new i/o is getting to the
1068 * card.
1069 * - The card is dead.
1070 */
1071 host = aac->scsi_host_ptr;
1072 scsi_block_requests(host);
1073 aac_adapter_disable_int(aac);
1074 spin_unlock_irq(host->host_lock);
1075 kthread_stop(aac->thread);
1076
1077 /*
1078 * If a positive health, means in a known DEAD PANIC
1079 * state and the adapter could be reset to `try again'.
1080 */
1081 retval = aac_adapter_check_health(aac);
1082 if (retval == 0)
1083 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1084 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1085 if (retval)
1086 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1087 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1088
1089 if (retval)
1090 goto out;
1091 if (ret != 0x00000001) {
1092 retval = -ENODEV;
1093 goto out;
1094 }
1095
1096 index = aac->cardtype;
1097
1098 /*
1099 * Re-initialize the adapter, first free resources, then carefully
1100 * apply the initialization sequence to come back again. Only risk
1101 * is a change in Firmware dropping cache, it is assumed the caller
1102 * will ensure that i/o is queisced and the card is flushed in that
1103 * case.
1104 */
1105 aac_fib_map_free(aac);
1106 aac->hw_fib_va = NULL;
1107 aac->hw_fib_pa = 0;
1108 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1109 aac->comm_addr = NULL;
1110 aac->comm_phys = 0;
1111 kfree(aac->queues);
1112 aac->queues = NULL;
1113 free_irq(aac->pdev->irq, aac);
1114 kfree(aac->fsa_dev);
1115 aac->fsa_dev = NULL;
1116 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1117 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1118 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1119 goto out;
1120 } else {
1121 if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1122 ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1123 goto out;
1124 }
1125 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1126 goto out;
1127 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1128 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1129 goto out;
1130 aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1131 if (IS_ERR(aac->thread)) {
1132 retval = PTR_ERR(aac->thread);
1133 goto out;
1134 }
1135 (void)aac_get_adapter_info(aac);
1136 quirks = aac_get_driver_ident(index)->quirks;
1137 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1138 host->sg_tablesize = 34;
1139 host->max_sectors = (host->sg_tablesize * 8) + 112;
1140 }
1141 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1142 host->sg_tablesize = 17;
1143 host->max_sectors = (host->sg_tablesize * 8) + 112;
1144 }
1145 aac_get_config_status(aac, 1);
1146 aac_get_containers(aac);
1147 /*
1148 * This is where the assumption that the Adapter is quiesced
1149 * is important.
1150 */
1151 command_list = NULL;
1152 __shost_for_each_device(dev, host) {
1153 unsigned long flags;
1154 spin_lock_irqsave(&dev->list_lock, flags);
1155 list_for_each_entry(command, &dev->cmd_list, list)
1156 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1157 command->SCp.buffer = (struct scatterlist *)command_list;
1158 command_list = command;
1159 }
1160 spin_unlock_irqrestore(&dev->list_lock, flags);
1161 }
1162 while ((command = command_list)) {
1163 command_list = (struct scsi_cmnd *)command->SCp.buffer;
1164 command->SCp.buffer = NULL;
1165 command->result = DID_OK << 16
1166 | COMMAND_COMPLETE << 8
1167 | SAM_STAT_TASK_SET_FULL;
1168 command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1169 command->scsi_done(command);
1170 }
1171 retval = 0;
1172
1173out:
1174 aac->in_reset = 0;
1175 scsi_unblock_requests(host);
1176 spin_lock_irq(host->host_lock);
1177 return retval;
1178}
1179
1180int aac_check_health(struct aac_dev * aac)
1181{
1182 int BlinkLED;
1183 unsigned long time_now, flagv = 0;
1184 struct list_head * entry;
1185 struct Scsi_Host * host;
1186
1187 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1188 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1189 return 0;
1190
1191 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1192 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1193 return 0; /* OK */
1194 }
1195
1196 aac->in_reset = 1;
1197
1198 /* Fake up an AIF:
1199 * aac_aifcmd.command = AifCmdEventNotify = 1
1200 * aac_aifcmd.seqnum = 0xFFFFFFFF
1201 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1202 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1203 * aac.aifcmd.data[2] = AifHighPriority = 3
1204 * aac.aifcmd.data[3] = BlinkLED
1205 */
1206
1207 time_now = jiffies/HZ;
1208 entry = aac->fib_list.next;
1209
1210 /*
1211 * For each Context that is on the
1212 * fibctxList, make a copy of the
1213 * fib, and then set the event to wake up the
1214 * thread that is waiting for it.
1215 */
1216 while (entry != &aac->fib_list) {
1217 /*
1218 * Extract the fibctx
1219 */
1220 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1221 struct hw_fib * hw_fib;
1222 struct fib * fib;
1223 /*
1224 * Check if the queue is getting
1225 * backlogged
1226 */
1227 if (fibctx->count > 20) {
1228 /*
1229 * It's *not* jiffies folks,
1230 * but jiffies / HZ, so do not
1231 * panic ...
1232 */
1233 u32 time_last = fibctx->jiffies;
1234 /*
1235 * Has it been > 2 minutes
1236 * since the last read off
1237 * the queue?
1238 */
1239 if ((time_now - time_last) > aif_timeout) {
1240 entry = entry->next;
1241 aac_close_fib_context(aac, fibctx);
1242 continue;
1243 }
1244 }
1245 /*
1246 * Warning: no sleep allowed while
1247 * holding spinlock
1248 */
1249 hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1250 fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1251 if (fib && hw_fib) {
1252 struct aac_aifcmd * aif;
1253
1254 memset(hw_fib, 0, sizeof(struct hw_fib));
1255 memset(fib, 0, sizeof(struct fib));
1256 fib->hw_fib = hw_fib;
1257 fib->dev = aac;
1258 aac_fib_init(fib);
1259 fib->type = FSAFS_NTC_FIB_CONTEXT;
1260 fib->size = sizeof (struct fib);
1261 fib->data = hw_fib->data;
1262 aif = (struct aac_aifcmd *)hw_fib->data;
1263 aif->command = cpu_to_le32(AifCmdEventNotify);
1264 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1265 aif->data[0] = cpu_to_le32(AifEnExpEvent);
1266 aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1267 aif->data[2] = cpu_to_le32(AifHighPriority);
1268 aif->data[3] = cpu_to_le32(BlinkLED);
1269
1270 /*
1271 * Put the FIB onto the
1272 * fibctx's fibs
1273 */
1274 list_add_tail(&fib->fiblink, &fibctx->fib_list);
1275 fibctx->count++;
1276 /*
1277 * Set the event to wake up the
1278 * thread that will waiting.
1279 */
1280 up(&fibctx->wait_sem);
1281 } else {
1282 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1283 kfree(fib);
1284 kfree(hw_fib);
1285 }
1286 entry = entry->next;
1287 }
1288
1289 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1290
1291 if (BlinkLED < 0) {
1292 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1293 goto out;
1294 }
1295
1296 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1297
1298 host = aac->scsi_host_ptr;
1299 spin_lock_irqsave(host->host_lock, flagv);
1300 BlinkLED = _aac_reset_adapter(aac);
1301 spin_unlock_irqrestore(host->host_lock, flagv);
1302 return BlinkLED;
1303
1304out:
1305 aac->in_reset = 0;
1306 return BlinkLED;
1307}
1308
1309
1048/** 1310/**
1049 * aac_command_thread - command processing thread 1311 * aac_command_thread - command processing thread
1050 * @dev: Adapter to monitor 1312 * @dev: Adapter to monitor
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index b2a5c7262f36..8335f07b7720 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -124,10 +124,15 @@ unsigned int aac_response_normal(struct aac_queue * q)
124 } else { 124 } else {
125 unsigned long flagv; 125 unsigned long flagv;
126 spin_lock_irqsave(&fib->event_lock, flagv); 126 spin_lock_irqsave(&fib->event_lock, flagv);
127 fib->done = 1; 127 if (!fib->done)
128 fib->done = 1;
128 up(&fib->event_wait); 129 up(&fib->event_wait);
129 spin_unlock_irqrestore(&fib->event_lock, flagv); 130 spin_unlock_irqrestore(&fib->event_lock, flagv);
130 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 131 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
132 if (fib->done == 2) {
133 aac_fib_complete(fib);
134 aac_fib_free(fib);
135 }
131 } 136 }
132 consumed++; 137 consumed++;
133 spin_lock_irqsave(q->lock, flags); 138 spin_lock_irqsave(q->lock, flags);
@@ -316,7 +321,8 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
316 unsigned long flagv; 321 unsigned long flagv;
317 dprintk((KERN_INFO "event_wait up\n")); 322 dprintk((KERN_INFO "event_wait up\n"));
318 spin_lock_irqsave(&fib->event_lock, flagv); 323 spin_lock_irqsave(&fib->event_lock, flagv);
319 fib->done = 1; 324 if (!fib->done)
325 fib->done = 1;
320 up(&fib->event_wait); 326 up(&fib->event_wait);
321 spin_unlock_irqrestore(&fib->event_lock, flagv); 327 spin_unlock_irqrestore(&fib->event_lock, flagv);
322 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 328 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e42a479ce64a..359e7ddfdb47 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -82,6 +82,8 @@ static LIST_HEAD(aac_devices);
82static int aac_cfg_major = -1; 82static int aac_cfg_major = -1;
83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; 83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 84
85extern int expose_physicals;
86
85/* 87/*
86 * Because of the way Linux names scsi devices, the order in this table has 88 * Because of the way Linux names scsi devices, the order in this table has
87 * become important. Check for on-board Raid first, add-in cards second. 89 * become important. Check for on-board Raid first, add-in cards second.
@@ -394,6 +396,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
394 sdev->skip_ms_page_3f = 1; 396 sdev->skip_ms_page_3f = 1;
395 } 397 }
396 if ((sdev->type == TYPE_DISK) && 398 if ((sdev->type == TYPE_DISK) &&
399 !expose_physicals &&
397 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 400 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
398 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 401 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
399 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 402 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
@@ -454,17 +457,17 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
454 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 457 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
455 AAC_DRIVERNAME); 458 AAC_DRIVERNAME);
456 aac = (struct aac_dev *)host->hostdata; 459 aac = (struct aac_dev *)host->hostdata;
457 if (aac_adapter_check_health(aac)) { 460
458 printk(KERN_ERR "%s: Host adapter appears dead\n", 461 if ((count = aac_check_health(aac)))
459 AAC_DRIVERNAME); 462 return count;
460 return -ENODEV;
461 }
462 /* 463 /*
463 * Wait for all commands to complete to this specific 464 * Wait for all commands to complete to this specific
464 * target (block maximum 60 seconds). 465 * target (block maximum 60 seconds).
465 */ 466 */
466 for (count = 60; count; --count) { 467 for (count = 60; count; --count) {
467 int active = 0; 468 int active = aac->in_reset;
469
470 if (active == 0)
468 __shost_for_each_device(dev, host) { 471 __shost_for_each_device(dev, host) {
469 spin_lock_irqsave(&dev->list_lock, flags); 472 spin_lock_irqsave(&dev->list_lock, flags);
470 list_for_each_entry(command, &dev->cmd_list, list) { 473 list_for_each_entry(command, &dev->cmd_list, list) {
@@ -864,13 +867,6 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
864 * Map in the registers from the adapter. 867 * Map in the registers from the adapter.
865 */ 868 */
866 aac->base_size = AAC_MIN_FOOTPRINT_SIZE; 869 aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
867 if ((aac->regs.sa = ioremap(
868 (unsigned long)aac->scsi_host_ptr->base, AAC_MIN_FOOTPRINT_SIZE))
869 == NULL) {
870 printk(KERN_WARNING "%s: unable to map adapter.\n",
871 AAC_DRIVERNAME);
872 goto out_free_fibs;
873 }
874 if ((*aac_drivers[index].init)(aac)) 870 if ((*aac_drivers[index].init)(aac))
875 goto out_unmap; 871 goto out_unmap;
876 872
@@ -928,12 +924,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
928 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 924 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
929 * physical channels are address by their actual physical number+1 925 * physical channels are address by their actual physical number+1
930 */ 926 */
931 if (aac->nondasd_support == 1) 927 if ((aac->nondasd_support == 1) || expose_physicals)
932 shost->max_channel = aac->maximum_num_channels; 928 shost->max_channel = aac->maximum_num_channels;
933 else 929 else
934 shost->max_channel = 0; 930 shost->max_channel = 0;
935 931
936 aac_get_config_status(aac); 932 aac_get_config_status(aac, 0);
937 aac_get_containers(aac); 933 aac_get_containers(aac);
938 list_add(&aac->entry, insert); 934 list_add(&aac->entry, insert);
939 935
@@ -969,8 +965,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
969 aac_fib_map_free(aac); 965 aac_fib_map_free(aac);
970 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 966 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
971 kfree(aac->queues); 967 kfree(aac->queues);
972 iounmap(aac->regs.sa); 968 aac_adapter_ioremap(aac, 0);
973 out_free_fibs:
974 kfree(aac->fibs); 969 kfree(aac->fibs);
975 kfree(aac->fsa_dev); 970 kfree(aac->fsa_dev);
976 out_free_host: 971 out_free_host:
@@ -1005,7 +1000,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1005 kfree(aac->queues); 1000 kfree(aac->queues);
1006 1001
1007 free_irq(pdev->irq, aac); 1002 free_irq(pdev->irq, aac);
1008 iounmap(aac->regs.sa); 1003 aac_adapter_ioremap(aac, 0);
1009 1004
1010 kfree(aac->fibs); 1005 kfree(aac->fibs);
1011 kfree(aac->fsa_dev); 1006 kfree(aac->fsa_dev);
@@ -1013,6 +1008,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1013 list_del(&aac->entry); 1008 list_del(&aac->entry);
1014 scsi_host_put(shost); 1009 scsi_host_put(shost);
1015 pci_disable_device(pdev); 1010 pci_disable_device(pdev);
1011 if (list_empty(&aac_devices)) {
1012 unregister_chrdev(aac_cfg_major, "aac");
1013 aac_cfg_major = -1;
1014 }
1016} 1015}
1017 1016
1018static struct pci_driver aac_pci_driver = { 1017static struct pci_driver aac_pci_driver = {
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 5b52966bbbf3..643f23b5ded8 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -28,370 +28,27 @@
28 * 28 *
29 */ 29 */
30 30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h> 31#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44 32
45#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
46 34
47#include "aacraid.h" 35#include "aacraid.h"
48 36
49static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52
53 if (dev->new_comm_interface) {
54 u32 Index = rkt_readl(dev, MUnit.OutboundQueue);
55 if (Index == 0xFFFFFFFFL)
56 Index = rkt_readl(dev, MUnit.OutboundQueue);
57 if (Index != 0xFFFFFFFFL) {
58 do {
59 if (aac_intr_normal(dev, Index)) {
60 rkt_writel(dev, MUnit.OutboundQueue, Index);
61 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
62 }
63 Index = rkt_readl(dev, MUnit.OutboundQueue);
64 } while (Index != 0xFFFFFFFFL);
65 return IRQ_HANDLED;
66 }
67 } else {
68 unsigned long bellbits;
69 u8 intstat;
70 intstat = rkt_readb(dev, MUnit.OISR);
71 /*
72 * Read mask and invert because drawbridge is reversed.
73 * This allows us to only service interrupts that have
74 * been enabled.
75 * Check to see if this is our interrupt. If it isn't just return
76 */
77 if (intstat & ~(dev->OIMR))
78 {
79 bellbits = rkt_readl(dev, OutboundDoorbellReg);
80 if (bellbits & DoorBellPrintfReady) {
81 aac_printf(dev, rkt_readl (dev, IndexRegs.Mailbox[5]));
82 rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
83 rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
84 }
85 else if (bellbits & DoorBellAdapterNormCmdReady) {
86 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
87 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
88// rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 }
103 }
104 return IRQ_NONE;
105}
106
107/**
108 * aac_rkt_disable_interrupt - Disable interrupts
109 * @dev: Adapter
110 */
111
112static void aac_rkt_disable_interrupt(struct aac_dev *dev)
113{
114 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
115}
116
117/** 37/**
118 * rkt_sync_cmd - send a command and wait 38 * aac_rkt_ioremap
119 * @dev: Adapter 39 * @size: mapping resize request
120 * @command: Command to execute
121 * @p1: first parameter
122 * @ret: adapter status
123 * 40 *
124 * This routine will send a synchronous command to the adapter and wait
125 * for its completion.
126 */ 41 */
127 42static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
128static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
129 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
130 u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
131{ 43{
132 unsigned long start; 44 if (!size) {
133 int ok; 45 iounmap(dev->regs.rkt);
134 /* 46 return 0;
135 * Write the command into Mailbox 0
136 */
137 rkt_writel(dev, InboundMailbox0, command);
138 /*
139 * Write the parameters into Mailboxes 1 - 6
140 */
141 rkt_writel(dev, InboundMailbox1, p1);
142 rkt_writel(dev, InboundMailbox2, p2);
143 rkt_writel(dev, InboundMailbox3, p3);
144 rkt_writel(dev, InboundMailbox4, p4);
145 /*
146 * Clear the synch command doorbell to start on a clean slate.
147 */
148 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
149 /*
150 * Disable doorbell interrupts
151 */
152 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
153 /*
154 * Force the completion of the mask register write before issuing
155 * the interrupt.
156 */
157 rkt_readb (dev, MUnit.OIMR);
158 /*
159 * Signal that there is a new synch command
160 */
161 rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
162
163 ok = 0;
164 start = jiffies;
165
166 /*
167 * Wait up to 30 seconds
168 */
169 while (time_before(jiffies, start+30*HZ))
170 {
171 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
172 /*
173 * Mon960 will set doorbell0 bit when it has completed the command.
174 */
175 if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
176 /*
177 * Clear the doorbell.
178 */
179 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
180 ok = 1;
181 break;
182 }
183 /*
184 * Yield the processor in case we are slow
185 */
186 msleep(1);
187 } 47 }
188 if (ok != 1) { 48 dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size);
189 /* 49 if (dev->base == NULL)
190 * Restore interrupt mask even though we timed out
191 */
192 if (dev->new_comm_interface)
193 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT;
197 }
198 /*
199 * Pull the synch status from Mailbox 0.
200 */
201 if (status)
202 *status = rkt_readl(dev, IndexRegs.Mailbox[0]);
203 if (r1)
204 *r1 = rkt_readl(dev, IndexRegs.Mailbox[1]);
205 if (r2)
206 *r2 = rkt_readl(dev, IndexRegs.Mailbox[2]);
207 if (r3)
208 *r3 = rkt_readl(dev, IndexRegs.Mailbox[3]);
209 if (r4)
210 *r4 = rkt_readl(dev, IndexRegs.Mailbox[4]);
211 /*
212 * Clear the synch command doorbell.
213 */
214 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
215 /*
216 * Restore interrupt mask
217 */
218 if (dev->new_comm_interface)
219 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0;
223
224}
225
226/**
227 * aac_rkt_interrupt_adapter - interrupt adapter
228 * @dev: Adapter
229 *
230 * Send an interrupt to the i960 and breakpoint it.
231 */
232
233static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
234{
235 rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
236 NULL, NULL, NULL, NULL, NULL);
237}
238
239/**
240 * aac_rkt_notify_adapter - send an event to the adapter
241 * @dev: Adapter
242 * @event: Event to send
243 *
244 * Notify the i960 that something it probably cares about has
245 * happened.
246 */
247
248static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
249{
250 switch (event) {
251
252 case AdapNormCmdQue:
253 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
254 break;
255 case HostNormRespNotFull:
256 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
257 break;
258 case AdapNormRespQue:
259 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
260 break;
261 case HostNormCmdNotFull:
262 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
263 break;
264 case HostShutdown:
265// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
266// NULL, NULL, NULL, NULL, NULL);
267 break;
268 case FastIo:
269 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
270 break;
271 case AdapPrintfDone:
272 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
273 break;
274 default:
275 BUG();
276 break;
277 }
278}
279
280/**
281 * aac_rkt_start_adapter - activate adapter
282 * @dev: Adapter
283 *
284 * Start up processing on an i960 based AAC adapter
285 */
286
287static void aac_rkt_start_adapter(struct aac_dev *dev)
288{
289 struct aac_init *init;
290
291 init = dev->init;
292 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
293 // We can only use a 32 bit address here
294 rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
295 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
296}
297
298/**
299 * aac_rkt_check_health
300 * @dev: device to check if healthy
301 *
302 * Will attempt to determine if the specified adapter is alive and
303 * capable of handling requests, returning 0 if alive.
304 */
305static int aac_rkt_check_health(struct aac_dev *dev)
306{
307 u32 status = rkt_readl(dev, MUnit.OMRx[0]);
308
309 /*
310 * Check to see if the board failed any self tests.
311 */
312 if (status & SELF_TEST_FAILED)
313 return -1; 50 return -1;
314 /* 51 dev->IndexRegs = &dev->regs.rkt->IndexRegs;
315 * Check to see if the board panic'd.
316 */
317 if (status & KERNEL_PANIC) {
318 char * buffer;
319 struct POSTSTATUS {
320 __le32 Post_Command;
321 __le32 Post_Address;
322 } * post;
323 dma_addr_t paddr, baddr;
324 int ret;
325
326 if ((status & 0xFF000000L) == 0xBC000000L)
327 return (status >> 16) & 0xFF;
328 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
329 ret = -2;
330 if (buffer == NULL)
331 return ret;
332 post = pci_alloc_consistent(dev->pdev,
333 sizeof(struct POSTSTATUS), &paddr);
334 if (post == NULL) {
335 pci_free_consistent(dev->pdev, 512, buffer, baddr);
336 return ret;
337 }
338 memset(buffer, 0, 512);
339 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
340 post->Post_Address = cpu_to_le32(baddr);
341 rkt_writel(dev, MUnit.IMRx[0], paddr);
342 rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
343 NULL, NULL, NULL, NULL, NULL);
344 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
345 post, paddr);
346 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
347 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
348 ret <<= 4;
349 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
350 }
351 pci_free_consistent(dev->pdev, 512, buffer, baddr);
352 return ret;
353 }
354 /*
355 * Wait for the adapter to be up and running.
356 */
357 if (!(status & KERNEL_UP_AND_RUNNING))
358 return -3;
359 /*
360 * Everything is OK
361 */
362 return 0;
363}
364
365/**
366 * aac_rkt_send
367 * @fib: fib to issue
368 *
369 * Will send a fib, returning 0 if successful.
370 */
371static int aac_rkt_send(struct fib * fib)
372{
373 u64 addr = fib->hw_fib_pa;
374 struct aac_dev *dev = fib->dev;
375 volatile void __iomem *device = dev->regs.rkt;
376 u32 Index;
377
378 dprintk((KERN_DEBUG "%p->aac_rkt_send(%p->%llx)\n", dev, fib, addr));
379 Index = rkt_readl(dev, MUnit.InboundQueue);
380 if (Index == 0xFFFFFFFFL)
381 Index = rkt_readl(dev, MUnit.InboundQueue);
382 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
383 if (Index == 0xFFFFFFFFL)
384 return Index;
385 device += Index;
386 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
387 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
388 writel((u32)(addr & 0xffffffff), device);
389 device += sizeof(u32);
390 writel((u32)(addr >> 32), device);
391 device += sizeof(u32);
392 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
393 rkt_writel(dev, MUnit.InboundQueue, Index);
394 dprintk((KERN_DEBUG "aac_rkt_send - return 0\n"));
395 return 0; 52 return 0;
396} 53}
397 54
@@ -406,78 +63,18 @@ static int aac_rkt_send(struct fib * fib)
406 63
407int aac_rkt_init(struct aac_dev *dev) 64int aac_rkt_init(struct aac_dev *dev)
408{ 65{
409 unsigned long start; 66 int retval;
410 unsigned long status; 67 extern int _aac_rx_init(struct aac_dev *dev);
411 int instance; 68 extern void aac_rx_start_adapter(struct aac_dev *dev);
412 const char * name;
413
414 instance = dev->id;
415 name = dev->name;
416 69
417 /* 70 /*
418 * Check to see if the board panic'd while booting.
419 */
420 /*
421 * Check to see if the board failed any self tests.
422 */
423 if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
424 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
425 goto error_iounmap;
426 }
427 /*
428 * Check to see if the monitor panic'd while booting.
429 */
430 if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
431 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
432 goto error_iounmap;
433 }
434 /*
435 * Check to see if the board panic'd while booting.
436 */
437 if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
438 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
439 goto error_iounmap;
440 }
441 start = jiffies;
442 /*
443 * Wait for the adapter to be up and running. Wait up to 3 minutes
444 */
445 while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
446 {
447 if(time_after(jiffies, start+startup_timeout*HZ))
448 {
449 status = rkt_readl(dev, MUnit.OMRx[0]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status);
452 goto error_iounmap;
453 }
454 msleep(1);
455 }
456 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
457 {
458 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
459 goto error_iounmap;
460 }
461 /*
462 * Fill in the function dispatch table. 71 * Fill in the function dispatch table.
463 */ 72 */
464 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; 73 dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
465 dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
466 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
467 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
468 dev->a_ops.adapter_check_health = aac_rkt_check_health;
469 dev->a_ops.adapter_send = aac_rkt_send;
470
471 /*
472 * First clear out all interrupts. Then enable the one's that we
473 * can handle.
474 */
475 rkt_writeb(dev, MUnit.OIMR, 0xff);
476 rkt_writel(dev, MUnit.ODR, 0xffffffff);
477 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
478 74
479 if (aac_init_adapter(dev) == NULL) 75 retval = _aac_rx_init(dev);
480 goto error_irq; 76 if (retval)
77 return retval;
481 if (dev->new_comm_interface) { 78 if (dev->new_comm_interface) {
482 /* 79 /*
483 * FIB Setup has already been done, but we can minimize the 80 * FIB Setup has already been done, but we can minimize the
@@ -494,20 +91,11 @@ int aac_rkt_init(struct aac_dev *dev)
494 dev->init->MaxIoCommands = cpu_to_le32(246); 91 dev->init->MaxIoCommands = cpu_to_le32(246);
495 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; 92 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
496 } 93 }
497 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
498 } 94 }
499 /* 95 /*
500 * Tell the adapter that all is configured, and it can start 96 * Tell the adapter that all is configured, and it can start
501 * accepting requests 97 * accepting requests
502 */ 98 */
503 aac_rkt_start_adapter(dev); 99 aac_rx_start_adapter(dev);
504 return 0; 100 return 0;
505
506error_irq:
507 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
508 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
509
510error_iounmap:
511
512 return -1;
513} 101}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 9dadfb28b3f1..a1d214d770eb 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -79,7 +79,7 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
79 { 79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg); 80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) { 81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5])); 82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 } 85 }
@@ -134,14 +134,14 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
134 /* 134 /*
135 * Write the command into Mailbox 0 135 * Write the command into Mailbox 0
136 */ 136 */
137 rx_writel(dev, InboundMailbox0, command); 137 writel(command, &dev->IndexRegs->Mailbox[0]);
138 /* 138 /*
139 * Write the parameters into Mailboxes 1 - 6 139 * Write the parameters into Mailboxes 1 - 6
140 */ 140 */
141 rx_writel(dev, InboundMailbox1, p1); 141 writel(p1, &dev->IndexRegs->Mailbox[1]);
142 rx_writel(dev, InboundMailbox2, p2); 142 writel(p2, &dev->IndexRegs->Mailbox[2]);
143 rx_writel(dev, InboundMailbox3, p3); 143 writel(p3, &dev->IndexRegs->Mailbox[3]);
144 rx_writel(dev, InboundMailbox4, p4); 144 writel(p4, &dev->IndexRegs->Mailbox[4]);
145 /* 145 /*
146 * Clear the synch command doorbell to start on a clean slate. 146 * Clear the synch command doorbell to start on a clean slate.
147 */ 147 */
@@ -199,15 +199,15 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
199 * Pull the synch status from Mailbox 0. 199 * Pull the synch status from Mailbox 0.
200 */ 200 */
201 if (status) 201 if (status)
202 *status = rx_readl(dev, IndexRegs.Mailbox[0]); 202 *status = readl(&dev->IndexRegs->Mailbox[0]);
203 if (r1) 203 if (r1)
204 *r1 = rx_readl(dev, IndexRegs.Mailbox[1]); 204 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
205 if (r2) 205 if (r2)
206 *r2 = rx_readl(dev, IndexRegs.Mailbox[2]); 206 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
207 if (r3) 207 if (r3)
208 *r3 = rx_readl(dev, IndexRegs.Mailbox[3]); 208 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
209 if (r4) 209 if (r4)
210 *r4 = rx_readl(dev, IndexRegs.Mailbox[4]); 210 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
211 /* 211 /*
212 * Clear the synch command doorbell. 212 * Clear the synch command doorbell.
213 */ 213 */
@@ -261,8 +261,6 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
262 break; 262 break;
263 case HostShutdown: 263 case HostShutdown:
264// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
265// NULL, NULL, NULL, NULL, NULL);
266 break; 264 break;
267 case FastIo: 265 case FastIo:
268 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 266 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
@@ -283,7 +281,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
283 * Start up processing on an i960 based AAC adapter 281 * Start up processing on an i960 based AAC adapter
284 */ 282 */
285 283
286static void aac_rx_start_adapter(struct aac_dev *dev) 284void aac_rx_start_adapter(struct aac_dev *dev)
287{ 285{
288 struct aac_init *init; 286 struct aac_init *init;
289 287
@@ -381,7 +379,7 @@ static int aac_rx_send(struct fib * fib)
381 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
382 if (Index == 0xFFFFFFFFL) 380 if (Index == 0xFFFFFFFFL)
383 return Index; 381 return Index;
384 device += Index; 382 device = dev->base + Index;
385 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
386 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); 384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
387 writel((u32)(addr & 0xffffffff), device); 385 writel((u32)(addr & 0xffffffff), device);
@@ -395,6 +393,43 @@ static int aac_rx_send(struct fib * fib)
395} 393}
396 394
397/** 395/**
396 * aac_rx_ioremap
397 * @size: mapping resize request
398 *
399 */
400static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
401{
402 if (!size) {
403 iounmap(dev->regs.rx);
404 return 0;
405 }
406 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size);
407 if (dev->base == NULL)
408 return -1;
409 dev->IndexRegs = &dev->regs.rx->IndexRegs;
410 return 0;
411}
412
413static int aac_rx_restart_adapter(struct aac_dev *dev)
414{
415 u32 var;
416
417 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n",
418 dev->name, dev->id);
419
420 if (aac_rx_check_health(dev) <= 0)
421 return 1;
422 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0,
423 &var, NULL, NULL, NULL, NULL))
424 return 1;
425 if (var != 0x00000001)
426 return 1;
427 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
428 return 1;
429 return 0;
430}
431
432/**
398 * aac_rx_init - initialize an i960 based AAC card 433 * aac_rx_init - initialize an i960 based AAC card
399 * @dev: device to configure 434 * @dev: device to configure
400 * 435 *
@@ -403,7 +438,7 @@ static int aac_rx_send(struct fib * fib)
403 * to the comm region. 438 * to the comm region.
404 */ 439 */
405 440
406int aac_rx_init(struct aac_dev *dev) 441int _aac_rx_init(struct aac_dev *dev)
407{ 442{
408 unsigned long start; 443 unsigned long start;
409 unsigned long status; 444 unsigned long status;
@@ -413,27 +448,30 @@ int aac_rx_init(struct aac_dev *dev)
413 instance = dev->id; 448 instance = dev->id;
414 name = dev->name; 449 name = dev->name;
415 450
451 if (aac_adapter_ioremap(dev, dev->base_size)) {
452 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
453 goto error_iounmap;
454 }
455
416 /* 456 /*
417 * Check to see if the board panic'd while booting. 457 * Check to see if the board panic'd while booting.
418 */ 458 */
459 status = rx_readl(dev, MUnit.OMRx[0]);
460 if (status & KERNEL_PANIC)
461 if (aac_rx_restart_adapter(dev))
462 goto error_iounmap;
419 /* 463 /*
420 * Check to see if the board failed any self tests. 464 * Check to see if the board failed any self tests.
421 */ 465 */
422 if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) { 466 status = rx_readl(dev, MUnit.OMRx[0]);
467 if (status & SELF_TEST_FAILED) {
423 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 468 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
424 goto error_iounmap; 469 goto error_iounmap;
425 } 470 }
426 /* 471 /*
427 * Check to see if the board panic'd while booting.
428 */
429 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
430 printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
431 goto error_iounmap;
432 }
433 /*
434 * Check to see if the monitor panic'd while booting. 472 * Check to see if the monitor panic'd while booting.
435 */ 473 */
436 if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) { 474 if (status & MONITOR_PANIC) {
437 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 475 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
438 goto error_iounmap; 476 goto error_iounmap;
439 } 477 }
@@ -441,19 +479,17 @@ int aac_rx_init(struct aac_dev *dev)
441 /* 479 /*
442 * Wait for the adapter to be up and running. Wait up to 3 minutes 480 * Wait for the adapter to be up and running. Wait up to 3 minutes
443 */ 481 */
444 while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 482 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
445 || (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
446 { 483 {
447 if(time_after(jiffies, start+startup_timeout*HZ)) 484 if(time_after(jiffies, start+startup_timeout*HZ))
448 { 485 {
449 status = rx_readl(dev, IndexRegs.Mailbox[7]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 486 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status); 487 dev->name, instance, status);
452 goto error_iounmap; 488 goto error_iounmap;
453 } 489 }
454 msleep(1); 490 msleep(1);
455 } 491 }
456 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 492 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
457 { 493 {
458 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); 494 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
459 goto error_iounmap; 495 goto error_iounmap;
@@ -481,11 +517,6 @@ int aac_rx_init(struct aac_dev *dev)
481 if (dev->new_comm_interface) 517 if (dev->new_comm_interface)
482 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
483 519
484 /*
485 * Tell the adapter that all is configured, and it can start
486 * accepting requests
487 */
488 aac_rx_start_adapter(dev);
489 return 0; 520 return 0;
490 521
491error_irq: 522error_irq:
@@ -496,3 +527,23 @@ error_iounmap:
496 527
497 return -1; 528 return -1;
498} 529}
530
531int aac_rx_init(struct aac_dev *dev)
532{
533 int retval;
534
535 /*
536 * Fill in the function dispatch table.
537 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
539
540 retval = _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 88d400fccc94..f906ead239dd 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -281,6 +281,21 @@ static int aac_sa_check_health(struct aac_dev *dev)
281} 281}
282 282
283/** 283/**
284 * aac_sa_ioremap
285 * @size: mapping resize request
286 *
287 */
288static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
289{
290 if (!size) {
291 iounmap(dev->regs.sa);
292 return 0;
293 }
294 dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
295 return (dev->base == NULL) ? -1 : 0;
296}
297
298/**
284 * aac_sa_init - initialize an ARM based AAC card 299 * aac_sa_init - initialize an ARM based AAC card
285 * @dev: device to configure 300 * @dev: device to configure
286 * 301 *
@@ -299,6 +314,11 @@ int aac_sa_init(struct aac_dev *dev)
299 instance = dev->id; 314 instance = dev->id;
300 name = dev->name; 315 name = dev->name;
301 316
317 if (aac_sa_ioremap(dev, dev->base_size)) {
318 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
319 goto error_iounmap;
320 }
321
302 /* 322 /*
303 * Check to see if the board failed any self tests. 323 * Check to see if the board failed any self tests.
304 */ 324 */
@@ -327,7 +347,7 @@ int aac_sa_init(struct aac_dev *dev)
327 msleep(1); 347 msleep(1);
328 } 348 }
329 349
330 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) { 350 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) {
331 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); 351 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
332 goto error_iounmap; 352 goto error_iounmap;
333 } 353 }
@@ -341,6 +361,7 @@ int aac_sa_init(struct aac_dev *dev)
341 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 361 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
342 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 362 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
343 dev->a_ops.adapter_check_health = aac_sa_check_health; 363 dev->a_ops.adapter_check_health = aac_sa_check_health;
364 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
344 365
345 /* 366 /*
346 * First clear out all interrupts. Then enable the one's that 367 * First clear out all interrupts. Then enable the one's that
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2a419634b256..773f02e3b10b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -371,7 +371,7 @@
371 371
372 1.5 (8/8/96): 372 1.5 (8/8/96):
373 1. Add support for ABP-940U (PCI Ultra) adapter. 373 1. Add support for ABP-940U (PCI Ultra) adapter.
374 2. Add support for IRQ sharing by setting the SA_SHIRQ flag for 374 2. Add support for IRQ sharing by setting the IRQF_SHARED flag for
375 request_irq and supplying a dev_id pointer to both request_irq() 375 request_irq and supplying a dev_id pointer to both request_irq()
376 and free_irq(). 376 and free_irq().
377 3. In AscSearchIOPortAddr11() restore a call to check_region() which 377 3. In AscSearchIOPortAddr11() restore a call to check_region() which
@@ -504,9 +504,9 @@
504 3. For v2.1.93 and newer kernels use CONFIG_PCI and new PCI BIOS 504 3. For v2.1.93 and newer kernels use CONFIG_PCI and new PCI BIOS
505 access functions. 505 access functions.
506 4. Update board serial number printing. 506 4. Update board serial number printing.
507 5. Try allocating an IRQ both with and without the SA_INTERRUPT 507 5. Try allocating an IRQ both with and without the IRQF_DISABLED
508 flag set to allow IRQ sharing with drivers that do not set 508 flag set to allow IRQ sharing with drivers that do not set
509 the SA_INTERRUPT flag. Also display a more descriptive error 509 the IRQF_DISABLED flag. Also display a more descriptive error
510 message if request_irq() fails. 510 message if request_irq() fails.
511 6. Update to latest Asc and Adv Libraries. 511 6. Update to latest Asc and Adv Libraries.
512 512
@@ -754,7 +754,6 @@
754 * --- Linux Include Files 754 * --- Linux Include Files
755 */ 755 */
756 756
757#include <linux/config.h>
758#include <linux/module.h> 757#include <linux/module.h>
759 758
760#if defined(CONFIG_X86) && !defined(CONFIG_ISA) 759#if defined(CONFIG_X86) && !defined(CONFIG_ISA)
@@ -889,10 +888,6 @@ typedef unsigned char uchar;
889#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F) 888#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F)
890#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7) 889#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7)
891#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF)) 890#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF))
892#define ASC_PCI_VENDORID 0x10CD
893#define ASC_PCI_DEVICEID_1200A 0x1100
894#define ASC_PCI_DEVICEID_1200B 0x1200
895#define ASC_PCI_DEVICEID_ULTRA 0x1300
896#define ASC_PCI_REVISION_3150 0x02 891#define ASC_PCI_REVISION_3150 0x02
897#define ASC_PCI_REVISION_3050 0x03 892#define ASC_PCI_REVISION_3050 0x03
898 893
@@ -900,6 +895,14 @@ typedef unsigned char uchar;
900#define ASC_DVCLIB_CALL_FAILED (0) 895#define ASC_DVCLIB_CALL_FAILED (0)
901#define ASC_DVCLIB_CALL_ERROR (-1) 896#define ASC_DVCLIB_CALL_ERROR (-1)
902 897
898#define PCI_VENDOR_ID_ASP 0x10cd
899#define PCI_DEVICE_ID_ASP_1200A 0x1100
900#define PCI_DEVICE_ID_ASP_ABP940 0x1200
901#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
902#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
903#define PCI_DEVICE_ID_38C0800_REV1 0x2500
904#define PCI_DEVICE_ID_38C1600_REV1 0x2700
905
903/* 906/*
904 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. 907 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
905 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() 908 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
@@ -1493,8 +1496,6 @@ typedef struct asc_dvc_cfg {
1493#define ASC_INIT_STATE_END_INQUIRY 0x0080 1496#define ASC_INIT_STATE_END_INQUIRY 0x0080
1494#define ASC_INIT_RESET_SCSI_DONE 0x0100 1497#define ASC_INIT_RESET_SCSI_DONE 0x0100
1495#define ASC_INIT_STATE_WITHOUT_EEP 0x8000 1498#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
1496#define ASC_PCI_DEVICE_ID_REV_A 0x1100
1497#define ASC_PCI_DEVICE_ID_REV_B 0x1200
1498#define ASC_BUG_FIX_IF_NOT_DWB 0x0001 1499#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
1499#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 1500#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
1500#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 1501#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
@@ -2101,12 +2102,6 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2101#define ADV_NUM_PAGE_CROSSING \ 2102#define ADV_NUM_PAGE_CROSSING \
2102 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE) 2103 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE)
2103 2104
2104/* a_condor.h */
2105#define ADV_PCI_VENDOR_ID 0x10CD
2106#define ADV_PCI_DEVICE_ID_REV_A 0x2300
2107#define ADV_PCI_DEVID_38C0800_REV1 0x2500
2108#define ADV_PCI_DEVID_38C1600_REV1 0x2700
2109
2110#define ADV_EEP_DVC_CFG_BEGIN (0x00) 2105#define ADV_EEP_DVC_CFG_BEGIN (0x00)
2111#define ADV_EEP_DVC_CFG_END (0x15) 2106#define ADV_EEP_DVC_CFG_END (0x15)
2112#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ 2107#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
@@ -3570,14 +3565,7 @@ typedef struct scsi_cmnd REQ, *REQP;
3570#define PCI_MAX_SLOT 0x1F 3565#define PCI_MAX_SLOT 0x1F
3571#define PCI_MAX_BUS 0xFF 3566#define PCI_MAX_BUS 0xFF
3572#define PCI_IOADDRESS_MASK 0xFFFE 3567#define PCI_IOADDRESS_MASK 0xFFFE
3573#define ASC_PCI_VENDORID 0x10CD
3574#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */ 3568#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */
3575#define ASC_PCI_DEVICE_ID_1100 0x1100
3576#define ASC_PCI_DEVICE_ID_1200 0x1200
3577#define ASC_PCI_DEVICE_ID_1300 0x1300
3578#define ASC_PCI_DEVICE_ID_2300 0x2300 /* ASC-3550 */
3579#define ASC_PCI_DEVICE_ID_2500 0x2500 /* ASC-38C0800 */
3580#define ASC_PCI_DEVICE_ID_2700 0x2700 /* ASC-38C1600 */
3581 3569
3582#ifndef ADVANSYS_STATS 3570#ifndef ADVANSYS_STATS
3583#define ASC_STATS(shp, counter) 3571#define ASC_STATS(shp, counter)
@@ -4331,12 +4319,12 @@ advansys_detect(struct scsi_host_template *tpnt)
4331 struct pci_dev *pci_devp = NULL; 4319 struct pci_dev *pci_devp = NULL;
4332 int pci_device_id_cnt = 0; 4320 int pci_device_id_cnt = 0;
4333 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = { 4321 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = {
4334 ASC_PCI_DEVICE_ID_1100, 4322 PCI_DEVICE_ID_ASP_1200A,
4335 ASC_PCI_DEVICE_ID_1200, 4323 PCI_DEVICE_ID_ASP_ABP940,
4336 ASC_PCI_DEVICE_ID_1300, 4324 PCI_DEVICE_ID_ASP_ABP940U,
4337 ASC_PCI_DEVICE_ID_2300, 4325 PCI_DEVICE_ID_ASP_ABP940UW,
4338 ASC_PCI_DEVICE_ID_2500, 4326 PCI_DEVICE_ID_38C0800_REV1,
4339 ASC_PCI_DEVICE_ID_2700 4327 PCI_DEVICE_ID_38C1600_REV1
4340 }; 4328 };
4341 ADV_PADDR pci_memory_address; 4329 ADV_PADDR pci_memory_address;
4342#endif /* CONFIG_PCI */ 4330#endif /* CONFIG_PCI */
@@ -4472,7 +4460,7 @@ advansys_detect(struct scsi_host_template *tpnt)
4472 4460
4473 /* Find all PCI cards. */ 4461 /* Find all PCI cards. */
4474 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) { 4462 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) {
4475 if ((pci_devp = pci_find_device(ASC_PCI_VENDORID, 4463 if ((pci_devp = pci_find_device(PCI_VENDOR_ID_ASP,
4476 pci_device_id[pci_device_id_cnt], pci_devp)) == 4464 pci_device_id[pci_device_id_cnt], pci_devp)) ==
4477 NULL) { 4465 NULL) {
4478 pci_device_id_cnt++; 4466 pci_device_id_cnt++;
@@ -4576,9 +4564,9 @@ advansys_detect(struct scsi_host_template *tpnt)
4576 */ 4564 */
4577#ifdef CONFIG_PCI 4565#ifdef CONFIG_PCI
4578 if (asc_bus[bus] == ASC_IS_PCI && 4566 if (asc_bus[bus] == ASC_IS_PCI &&
4579 (pci_devp->device == ASC_PCI_DEVICE_ID_2300 || 4567 (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW ||
4580 pci_devp->device == ASC_PCI_DEVICE_ID_2500 || 4568 pci_devp->device == PCI_DEVICE_ID_38C0800_REV1 ||
4581 pci_devp->device == ASC_PCI_DEVICE_ID_2700)) 4569 pci_devp->device == PCI_DEVICE_ID_38C1600_REV1))
4582 { 4570 {
4583 boardp->flags |= ASC_IS_WIDE_BOARD; 4571 boardp->flags |= ASC_IS_WIDE_BOARD;
4584 } 4572 }
@@ -4601,11 +4589,11 @@ advansys_detect(struct scsi_host_template *tpnt)
4601 adv_dvc_varp->isr_callback = adv_isr_callback; 4589 adv_dvc_varp->isr_callback = adv_isr_callback;
4602 adv_dvc_varp->async_callback = adv_async_callback; 4590 adv_dvc_varp->async_callback = adv_async_callback;
4603#ifdef CONFIG_PCI 4591#ifdef CONFIG_PCI
4604 if (pci_devp->device == ASC_PCI_DEVICE_ID_2300) 4592 if (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW)
4605 { 4593 {
4606 ASC_DBG(1, "advansys_detect: ASC-3550\n"); 4594 ASC_DBG(1, "advansys_detect: ASC-3550\n");
4607 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; 4595 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550;
4608 } else if (pci_devp->device == ASC_PCI_DEVICE_ID_2500) 4596 } else if (pci_devp->device == PCI_DEVICE_ID_38C0800_REV1)
4609 { 4597 {
4610 ASC_DBG(1, "advansys_detect: ASC-38C0800\n"); 4598 ASC_DBG(1, "advansys_detect: ASC-38C0800\n");
4611 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; 4599 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800;
@@ -5203,19 +5191,19 @@ advansys_detect(struct scsi_host_template *tpnt)
5203 /* Register IRQ Number. */ 5191 /* Register IRQ Number. */
5204 ASC_DBG1(2, "advansys_detect: request_irq() %d\n", shp->irq); 5192 ASC_DBG1(2, "advansys_detect: request_irq() %d\n", shp->irq);
5205 /* 5193 /*
5206 * If request_irq() fails with the SA_INTERRUPT flag set, 5194 * If request_irq() fails with the IRQF_DISABLED flag set,
5207 * then try again without the SA_INTERRUPT flag set. This 5195 * then try again without the IRQF_DISABLED flag set. This
5208 * allows IRQ sharing to work even with other drivers that 5196 * allows IRQ sharing to work even with other drivers that
5209 * do not set the SA_INTERRUPT flag. 5197 * do not set the IRQF_DISABLED flag.
5210 * 5198 *
5211 * If SA_INTERRUPT is not set, then interrupts are enabled 5199 * If IRQF_DISABLED is not set, then interrupts are enabled
5212 * before the driver interrupt function is called. 5200 * before the driver interrupt function is called.
5213 */ 5201 */
5214 if (((ret = request_irq(shp->irq, advansys_interrupt, 5202 if (((ret = request_irq(shp->irq, advansys_interrupt,
5215 SA_INTERRUPT | (share_irq == TRUE ? SA_SHIRQ : 0), 5203 IRQF_DISABLED | (share_irq == TRUE ? IRQF_SHARED : 0),
5216 "advansys", boardp)) != 0) && 5204 "advansys", boardp)) != 0) &&
5217 ((ret = request_irq(shp->irq, advansys_interrupt, 5205 ((ret = request_irq(shp->irq, advansys_interrupt,
5218 (share_irq == TRUE ? SA_SHIRQ : 0), 5206 (share_irq == TRUE ? IRQF_SHARED : 0),
5219 "advansys", boardp)) != 0)) 5207 "advansys", boardp)) != 0))
5220 { 5208 {
5221 if (ret == -EBUSY) { 5209 if (ret == -EBUSY) {
@@ -11923,7 +11911,7 @@ AscInitGetConfig(
11923 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc, 11911 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc,
11924 AscPCIConfigRevisionIDRegister); 11912 AscPCIConfigRevisionIDRegister);
11925 11913
11926 if (PCIVendorID != ASC_PCI_VENDORID) { 11914 if (PCIVendorID != PCI_VENDOR_ID_ASP) {
11927 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11915 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11928 } 11916 }
11929 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc, 11917 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc,
@@ -11943,15 +11931,15 @@ AscInitGetConfig(
11943 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11931 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11944 } 11932 }
11945 } 11933 }
11946 if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) || 11934 if ((PCIDeviceID == PCI_DEVICE_ID_ASP_1200A) ||
11947 (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) { 11935 (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940)) {
11948 DvcWritePCIConfigByte(asc_dvc, 11936 DvcWritePCIConfigByte(asc_dvc,
11949 AscPCIConfigLatencyTimer, 0x00); 11937 AscPCIConfigLatencyTimer, 0x00);
11950 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) 11938 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer)
11951 != 0x00) { 11939 != 0x00) {
11952 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11940 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11953 } 11941 }
11954 } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) { 11942 } else if (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940U) {
11955 if (DvcReadPCIConfigByte(asc_dvc, 11943 if (DvcReadPCIConfigByte(asc_dvc,
11956 AscPCIConfigLatencyTimer) < 0x20) { 11944 AscPCIConfigLatencyTimer) < 0x20) {
11957 DvcWritePCIConfigByte(asc_dvc, 11945 DvcWritePCIConfigByte(asc_dvc,
@@ -12038,8 +12026,8 @@ AscInitFromAscDvcVar(
12038 AscSetChipCfgMsw(iop_base, cfg_msw); 12026 AscSetChipCfgMsw(iop_base, cfg_msw);
12039 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { 12027 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
12040 } else { 12028 } else {
12041 if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) || 12029 if ((pci_device_id == PCI_DEVICE_ID_ASP_1200A) ||
12042 (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) { 12030 (pci_device_id == PCI_DEVICE_ID_ASP_ABP940)) {
12043 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; 12031 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
12044 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; 12032 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
12045 } 12033 }
@@ -12374,7 +12362,7 @@ AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
12374 ASC_PRINT1( 12362 ASC_PRINT1(
12375"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); 12363"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i);
12376 } else { 12364 } else {
12377 ASC_PRINT("AscInitFromEEP: Succesfully re-wrote EEPROM."); 12365 ASC_PRINT("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
12378 } 12366 }
12379 } 12367 }
12380 return (warn_code); 12368 return (warn_code);
@@ -14276,8 +14264,8 @@ Default_38C0800_EEPROM_Config __initdata = {
14276 0, /* 55 reserved */ 14264 0, /* 55 reserved */
14277 0, /* 56 cisptr_lsw */ 14265 0, /* 56 cisptr_lsw */
14278 0, /* 57 cisprt_msw */ 14266 0, /* 57 cisprt_msw */
14279 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14267 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14280 ADV_PCI_DEVID_38C0800_REV1, /* 59 subsysid */ 14268 PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */
14281 0, /* 60 reserved */ 14269 0, /* 60 reserved */
14282 0, /* 61 reserved */ 14270 0, /* 61 reserved */
14283 0, /* 62 reserved */ 14271 0, /* 62 reserved */
@@ -14406,8 +14394,8 @@ Default_38C1600_EEPROM_Config __initdata = {
14406 0, /* 55 reserved */ 14394 0, /* 55 reserved */
14407 0, /* 56 cisptr_lsw */ 14395 0, /* 56 cisptr_lsw */
14408 0, /* 57 cisprt_msw */ 14396 0, /* 57 cisprt_msw */
14409 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14397 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14410 ADV_PCI_DEVID_38C1600_REV1, /* 59 subsysid */ 14398 PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */
14411 0, /* 60 reserved */ 14399 0, /* 60 reserved */
14412 0, /* 61 reserved */ 14400 0, /* 61 reserved */
14413 0, /* 62 reserved */ 14401 0, /* 62 reserved */
@@ -17316,7 +17304,7 @@ AdvWaitEEPCmd(AdvPortAddr iop_base)
17316/* 17304/*
17317 * Write the EEPROM from 'cfg_buf'. 17305 * Write the EEPROM from 'cfg_buf'.
17318 */ 17306 */
17319void 17307void __init
17320AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) 17308AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
17321{ 17309{
17322 ushort *wbuf; 17310 ushort *wbuf;
@@ -17383,7 +17371,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
17383/* 17371/*
17384 * Write the EEPROM from 'cfg_buf'. 17372 * Write the EEPROM from 'cfg_buf'.
17385 */ 17373 */
17386void 17374void __init
17387AdvSet38C0800EEPConfig(AdvPortAddr iop_base, 17375AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
17388 ADVEEP_38C0800_CONFIG *cfg_buf) 17376 ADVEEP_38C0800_CONFIG *cfg_buf)
17389{ 17377{
@@ -17451,7 +17439,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
17451/* 17439/*
17452 * Write the EEPROM from 'cfg_buf'. 17440 * Write the EEPROM from 'cfg_buf'.
17453 */ 17441 */
17454void 17442void __init
17455AdvSet38C1600EEPConfig(AdvPortAddr iop_base, 17443AdvSet38C1600EEPConfig(AdvPortAddr iop_base,
17456 ADVEEP_38C1600_CONFIG *cfg_buf) 17444 ADVEEP_38C1600_CONFIG *cfg_buf)
17457{ 17445{
@@ -18226,3 +18214,22 @@ AdvInquiryHandling(
18226 } 18214 }
18227} 18215}
18228MODULE_LICENSE("Dual BSD/GPL"); 18216MODULE_LICENSE("Dual BSD/GPL");
18217
18218/* PCI Devices supported by this driver */
18219static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
18220 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
18221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18222 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
18223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18224 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U,
18225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18226 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW,
18227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18228 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1,
18229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18230 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1,
18231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18232 { }
18233};
18234MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
18235
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index de80cdfb5b9d..fb6a476eb873 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -253,6 +253,7 @@
253#include <linux/isapnp.h> 253#include <linux/isapnp.h>
254#include <linux/spinlock.h> 254#include <linux/spinlock.h>
255#include <linux/workqueue.h> 255#include <linux/workqueue.h>
256#include <linux/list.h>
256#include <asm/semaphore.h> 257#include <asm/semaphore.h>
257#include <scsi/scsicam.h> 258#include <scsi/scsicam.h>
258 259
@@ -262,6 +263,8 @@
262#include <scsi/scsi_transport_spi.h> 263#include <scsi/scsi_transport_spi.h>
263#include "aha152x.h" 264#include "aha152x.h"
264 265
266static LIST_HEAD(aha152x_host_list);
267
265 268
266/* DEFINES */ 269/* DEFINES */
267 270
@@ -423,8 +426,6 @@ MODULE_DEVICE_TABLE(isapnp, id_table);
423 426
424#endif /* !PCMCIA */ 427#endif /* !PCMCIA */
425 428
426static int registered_count=0;
427static struct Scsi_Host *aha152x_host[2];
428static struct scsi_host_template aha152x_driver_template; 429static struct scsi_host_template aha152x_driver_template;
429 430
430/* 431/*
@@ -541,6 +542,7 @@ struct aha152x_hostdata {
541#ifdef __ISAPNP__ 542#ifdef __ISAPNP__
542 struct pnp_dev *pnpdev; 543 struct pnp_dev *pnpdev;
543#endif 544#endif
545 struct list_head host_list;
544}; 546};
545 547
546 548
@@ -551,6 +553,11 @@ struct aha152x_hostdata {
551struct aha152x_scdata { 553struct aha152x_scdata {
552 Scsi_Cmnd *next; /* next sc in queue */ 554 Scsi_Cmnd *next; /* next sc in queue */
553 struct semaphore *sem; /* semaphore to block on */ 555 struct semaphore *sem; /* semaphore to block on */
556 unsigned char cmd_len;
557 unsigned char cmnd[MAX_COMMAND_SIZE];
558 unsigned short use_sg;
559 unsigned request_bufflen;
560 void *request_buffer;
554}; 561};
555 562
556 563
@@ -750,20 +757,9 @@ static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
750 return ptr; 757 return ptr;
751} 758}
752 759
753static inline struct Scsi_Host *lookup_irq(int irqno)
754{
755 int i;
756
757 for(i=0; i<ARRAY_SIZE(aha152x_host); i++)
758 if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
759 return aha152x_host[i];
760
761 return NULL;
762}
763
764static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs) 760static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
765{ 761{
766 struct Scsi_Host *shpnt = lookup_irq(irqno); 762 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
767 763
768 if (!shpnt) { 764 if (!shpnt) {
769 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno); 765 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno);
@@ -786,10 +782,11 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
786 return NULL; 782 return NULL;
787 } 783 }
788 784
789 /* need to have host registered before triggering any interrupt */
790 aha152x_host[registered_count] = shpnt;
791
792 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); 785 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt));
786 INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list);
787
788 /* need to have host registered before triggering any interrupt */
789 list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
793 790
794 shpnt->io_port = setup->io_port; 791 shpnt->io_port = setup->io_port;
795 shpnt->n_io_port = IO_RANGE; 792 shpnt->n_io_port = IO_RANGE;
@@ -855,7 +852,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
855 SETPORT(SIMODE0, 0); 852 SETPORT(SIMODE0, 0);
856 SETPORT(SIMODE1, 0); 853 SETPORT(SIMODE1, 0);
857 854
858 if( request_irq(shpnt->irq, swintr, SA_INTERRUPT|SA_SHIRQ, "aha152x", shpnt) ) { 855 if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
859 printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq); 856 printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
860 goto out_host_put; 857 goto out_host_put;
861 } 858 }
@@ -889,7 +886,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
889 SETPORT(SSTAT0, 0x7f); 886 SETPORT(SSTAT0, 0x7f);
890 SETPORT(SSTAT1, 0xef); 887 SETPORT(SSTAT1, 0xef);
891 888
892 if ( request_irq(shpnt->irq, intr, SA_INTERRUPT|SA_SHIRQ, "aha152x", shpnt) ) { 889 if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
893 printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq); 890 printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
894 goto out_host_put; 891 goto out_host_put;
895 } 892 }
@@ -902,12 +899,10 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
902 899
903 scsi_scan_host(shpnt); 900 scsi_scan_host(shpnt);
904 901
905 registered_count++;
906
907 return shpnt; 902 return shpnt;
908 903
909out_host_put: 904out_host_put:
910 aha152x_host[registered_count]=NULL; 905 list_del(&HOSTDATA(shpnt)->host_list);
911 scsi_host_put(shpnt); 906 scsi_host_put(shpnt);
912 907
913 return NULL; 908 return NULL;
@@ -932,6 +927,7 @@ void aha152x_release(struct Scsi_Host *shpnt)
932#endif 927#endif
933 928
934 scsi_remove_host(shpnt); 929 scsi_remove_host(shpnt);
930 list_del(&HOSTDATA(shpnt)->host_list);
935 scsi_host_put(shpnt); 931 scsi_host_put(shpnt);
936} 932}
937 933
@@ -1006,11 +1002,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1006 return FAILED; 1002 return FAILED;
1007 } 1003 }
1008 } else { 1004 } else {
1005 struct aha152x_scdata *sc;
1006
1009 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1007 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1010 if(SCpnt->host_scribble==0) { 1008 if(SCpnt->host_scribble==0) {
1011 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1009 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
1012 return FAILED; 1010 return FAILED;
1013 } 1011 }
1012
1013 sc = SCDATA(SCpnt);
1014 memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
1015 sc->request_buffer = SCpnt->request_buffer;
1016 sc->request_bufflen = SCpnt->request_bufflen;
1017 sc->use_sg = SCpnt->use_sg;
1018 sc->cmd_len = SCpnt->cmd_len;
1014 } 1019 }
1015 1020
1016 SCNEXT(SCpnt) = NULL; 1021 SCNEXT(SCpnt) = NULL;
@@ -1165,6 +1170,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1165 DECLARE_MUTEX_LOCKED(sem); 1170 DECLARE_MUTEX_LOCKED(sem);
1166 struct timer_list timer; 1171 struct timer_list timer;
1167 int ret, issued, disconnected; 1172 int ret, issued, disconnected;
1173 unsigned char old_cmd_len = SCpnt->cmd_len;
1174 unsigned short old_use_sg = SCpnt->use_sg;
1175 void *old_buffer = SCpnt->request_buffer;
1176 unsigned old_bufflen = SCpnt->request_bufflen;
1168 unsigned long flags; 1177 unsigned long flags;
1169 1178
1170#if defined(AHA152X_DEBUG) 1179#if defined(AHA152X_DEBUG)
@@ -1198,11 +1207,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1198 add_timer(&timer); 1207 add_timer(&timer);
1199 down(&sem); 1208 down(&sem);
1200 del_timer(&timer); 1209 del_timer(&timer);
1201 1210
1202 SCpnt->cmd_len = SCpnt->old_cmd_len; 1211 SCpnt->cmd_len = old_cmd_len;
1203 SCpnt->use_sg = SCpnt->old_use_sg; 1212 SCpnt->use_sg = old_use_sg;
1204 SCpnt->request_buffer = SCpnt->buffer; 1213 SCpnt->request_buffer = old_buffer;
1205 SCpnt->request_bufflen = SCpnt->bufflen; 1214 SCpnt->request_bufflen = old_bufflen;
1206 1215
1207 DO_LOCK(flags); 1216 DO_LOCK(flags);
1208 1217
@@ -1441,9 +1450,12 @@ static struct work_struct aha152x_tq;
1441 */ 1450 */
1442static void run(void) 1451static void run(void)
1443{ 1452{
1444 int i; 1453 struct aha152x_hostdata *hd;
1445 for (i = 0; i<ARRAY_SIZE(aha152x_host); i++) { 1454
1446 is_complete(aha152x_host[i]); 1455 list_for_each_entry(hd, &aha152x_host_list, host_list) {
1456 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
1457
1458 is_complete(shost);
1447 } 1459 }
1448} 1460}
1449 1461
@@ -1453,7 +1465,7 @@ static void run(void)
1453 */ 1465 */
1454static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs) 1466static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs)
1455{ 1467{
1456 struct Scsi_Host *shpnt = lookup_irq(irqno); 1468 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
1457 unsigned long flags; 1469 unsigned long flags;
1458 unsigned char rev, dmacntrl0; 1470 unsigned char rev, dmacntrl0;
1459 1471
@@ -1565,6 +1577,9 @@ static void busfree_run(struct Scsi_Host *shpnt)
1565#endif 1577#endif
1566 1578
1567 if(DONE_SC->SCp.phase & check_condition) { 1579 if(DONE_SC->SCp.phase & check_condition) {
1580 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
1581 struct aha152x_scdata *sc = SCDATA(cmd);
1582
1568#if 0 1583#if 0
1569 if(HOSTDATA(shpnt)->debug & debug_eh) { 1584 if(HOSTDATA(shpnt)->debug & debug_eh) {
1570 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); 1585 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
@@ -1573,13 +1588,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
1573#endif 1588#endif
1574 1589
1575 /* restore old command */ 1590 /* restore old command */
1576 memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); 1591 memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
1577 DONE_SC->request_buffer = DONE_SC->buffer; 1592 cmd->request_buffer = sc->request_buffer;
1578 DONE_SC->request_bufflen = DONE_SC->bufflen; 1593 cmd->request_bufflen = sc->request_bufflen;
1579 DONE_SC->use_sg = DONE_SC->old_use_sg; 1594 cmd->use_sg = sc->use_sg;
1580 DONE_SC->cmd_len = DONE_SC->old_cmd_len; 1595 cmd->cmd_len = sc->cmd_len;
1581 1596
1582 DONE_SC->SCp.Status = 0x02; 1597 cmd->SCp.Status = 0x02;
1583 1598
1584 HOSTDATA(shpnt)->commands--; 1599 HOSTDATA(shpnt)->commands--;
1585 if (!HOSTDATA(shpnt)->commands) 1600 if (!HOSTDATA(shpnt)->commands)
@@ -3932,16 +3947,17 @@ static int __init aha152x_init(void)
3932#endif 3947#endif
3933 } 3948 }
3934 3949
3935 return registered_count>0; 3950 return 1;
3936} 3951}
3937 3952
3938static void __exit aha152x_exit(void) 3953static void __exit aha152x_exit(void)
3939{ 3954{
3940 int i; 3955 struct aha152x_hostdata *hd;
3956
3957 list_for_each_entry(hd, &aha152x_host_list, host_list) {
3958 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
3941 3959
3942 for(i=0; i<ARRAY_SIZE(setup); i++) { 3960 aha152x_release(shost);
3943 aha152x_release(aha152x_host[i]);
3944 aha152x_host[i]=NULL;
3945 } 3961 }
3946} 3962}
3947 3963
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 86c6bd234591..24f0f5461792 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -25,7 +25,6 @@
25 * Added proper detection of the AHA-1640 (MCA version of AHA-1540) 25 * Added proper detection of the AHA-1640 (MCA version of AHA-1540)
26 */ 26 */
27 27
28#include <linux/config.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31#include <linux/kernel.h> 30#include <linux/kernel.h>
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 4b8c6a543925..6b35ed8301e0 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -634,7 +634,7 @@ static int aha1740_probe (struct device *dev)
634 } 634 }
635 635
636 DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level)); 636 DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level));
637 if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : SA_SHIRQ, 637 if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED,
638 "aha1740",shpnt)) { 638 "aha1740",shpnt)) {
639 printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n", 639 printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n",
640 irq_level); 640 irq_level);
@@ -681,6 +681,7 @@ static struct eisa_device_id aha1740_ids[] = {
681 { "ADP0400" }, /* 1744 */ 681 { "ADP0400" }, /* 1744 */
682 { "" } 682 { "" }
683}; 683};
684MODULE_DEVICE_TABLE(eisa, aha1740_ids);
684 685
685static struct eisa_driver aha1740_driver = { 686static struct eisa_driver aha1740_driver = {
686 .id_table = aha1740_ids, 687 .id_table = aha1740_ids,
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
deleted file mode 100644
index 4bb77f62b3b9..000000000000
--- a/drivers/scsi/ahci.c
+++ /dev/null
@@ -1,1472 +0,0 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "1.3"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
98
99 /* registers for each SATA port */
100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
101 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
102 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
103 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
104 PORT_IRQ_STAT = 0x10, /* interrupt status */
105 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
106 PORT_CMD = 0x18, /* port command */
107 PORT_TFDATA = 0x20, /* taskfile data */
108 PORT_SIG = 0x24, /* device TF signature */
109 PORT_CMD_ISSUE = 0x38, /* command issue */
110 PORT_SCR = 0x28, /* SATA phy register block */
111 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
112 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
113 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
114 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
115
116 /* PORT_IRQ_{STAT,MASK} bits */
117 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
118 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
119 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
120 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
121 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
122 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
123 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
124 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
125
126 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
127 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
128 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
129 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
130 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
131 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
132 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
135
136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
137 PORT_IRQ_IF_ERR |
138 PORT_IRQ_CONNECT |
139 PORT_IRQ_PHYRDY |
140 PORT_IRQ_UNK_FIS,
141 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
142 PORT_IRQ_TF_ERR |
143 PORT_IRQ_HBUS_DATA_ERR,
144 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
145 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
146 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
147
148 /* PORT_CMD bits */
149 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
150 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
151 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
152 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
153 PORT_CMD_CLO = (1 << 3), /* Command list override */
154 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
155 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
156 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
157
158 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
159 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
160 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
161
162 /* hpriv->flags bits */
163 AHCI_FLAG_MSI = (1 << 0),
164
165 /* ap->flags bits */
166 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
167 AHCI_FLAG_NO_NCQ = (1 << 25),
168};
169
170struct ahci_cmd_hdr {
171 u32 opts;
172 u32 status;
173 u32 tbl_addr;
174 u32 tbl_addr_hi;
175 u32 reserved[4];
176};
177
178struct ahci_sg {
179 u32 addr;
180 u32 addr_hi;
181 u32 reserved;
182 u32 flags_size;
183};
184
185struct ahci_host_priv {
186 unsigned long flags;
187 u32 cap; /* cache of HOST_CAP register */
188 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
189};
190
191struct ahci_port_priv {
192 struct ahci_cmd_hdr *cmd_slot;
193 dma_addr_t cmd_slot_dma;
194 void *cmd_tbl;
195 dma_addr_t cmd_tbl_dma;
196 void *rx_fis;
197 dma_addr_t rx_fis_dma;
198};
199
200static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
201static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
202static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
203static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
204static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
205static void ahci_irq_clear(struct ata_port *ap);
206static int ahci_port_start(struct ata_port *ap);
207static void ahci_port_stop(struct ata_port *ap);
208static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
209static void ahci_qc_prep(struct ata_queued_cmd *qc);
210static u8 ahci_check_status(struct ata_port *ap);
211static void ahci_freeze(struct ata_port *ap);
212static void ahci_thaw(struct ata_port *ap);
213static void ahci_error_handler(struct ata_port *ap);
214static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
215static void ahci_remove_one (struct pci_dev *pdev);
216
217static struct scsi_host_template ahci_sht = {
218 .module = THIS_MODULE,
219 .name = DRV_NAME,
220 .ioctl = ata_scsi_ioctl,
221 .queuecommand = ata_scsi_queuecmd,
222 .change_queue_depth = ata_scsi_change_queue_depth,
223 .can_queue = AHCI_MAX_CMDS - 1,
224 .this_id = ATA_SHT_THIS_ID,
225 .sg_tablesize = AHCI_MAX_SG,
226 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
227 .emulated = ATA_SHT_EMULATED,
228 .use_clustering = AHCI_USE_CLUSTERING,
229 .proc_name = DRV_NAME,
230 .dma_boundary = AHCI_DMA_BOUNDARY,
231 .slave_configure = ata_scsi_slave_config,
232 .slave_destroy = ata_scsi_slave_destroy,
233 .bios_param = ata_std_bios_param,
234};
235
236static const struct ata_port_operations ahci_ops = {
237 .port_disable = ata_port_disable,
238
239 .check_status = ahci_check_status,
240 .check_altstatus = ahci_check_status,
241 .dev_select = ata_noop_dev_select,
242
243 .tf_read = ahci_tf_read,
244
245 .qc_prep = ahci_qc_prep,
246 .qc_issue = ahci_qc_issue,
247
248 .irq_handler = ahci_interrupt,
249 .irq_clear = ahci_irq_clear,
250
251 .scr_read = ahci_scr_read,
252 .scr_write = ahci_scr_write,
253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
260 .port_start = ahci_port_start,
261 .port_stop = ahci_port_stop,
262};
263
264static const struct ata_port_info ahci_port_info[] = {
265 /* board_ahci */
266 {
267 .sht = &ahci_sht,
268 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
269 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
270 ATA_FLAG_SKIP_D2H_BSY,
271 .pio_mask = 0x1f, /* pio0-4 */
272 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
273 .port_ops = &ahci_ops,
274 },
275 /* board_ahci_vt8251 */
276 {
277 .sht = &ahci_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
279 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
280 ATA_FLAG_SKIP_D2H_BSY |
281 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
282 .pio_mask = 0x1f, /* pio0-4 */
283 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
284 .port_ops = &ahci_ops,
285 },
286};
287
288static const struct pci_device_id ahci_pci_tbl[] = {
289 /* Intel */
290 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
291 board_ahci }, /* ICH6 */
292 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
293 board_ahci }, /* ICH6M */
294 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
295 board_ahci }, /* ICH7 */
296 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
297 board_ahci }, /* ICH7M */
298 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
299 board_ahci }, /* ICH7R */
300 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
301 board_ahci }, /* ULi M5288 */
302 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ESB2 */
304 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ESB2 */
306 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ESB2 */
308 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7-M DH */
310 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH8 */
312 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ICH8 */
314 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ICH8 */
316 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ICH8M */
318 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ICH8M */
320
321 /* JMicron */
322 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* JMicron JMB360 */
324 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* JMicron JMB361 */
326 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* JMicron JMB363 */
328 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* JMicron JMB365 */
330 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* JMicron JMB366 */
332
333 /* ATI */
334 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* ATI SB600 non-raid */
336 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* ATI SB600 raid */
338
339 /* VIA */
340 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci_vt8251 }, /* VIA VT8251 */
342
343 /* NVIDIA */
344 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
345 board_ahci }, /* MCP65 */
346 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* MCP65 */
348 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* MCP65 */
350 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
351 board_ahci }, /* MCP65 */
352
353 { } /* terminate list */
354};
355
356
357static struct pci_driver ahci_pci_driver = {
358 .name = DRV_NAME,
359 .id_table = ahci_pci_tbl,
360 .probe = ahci_init_one,
361 .remove = ahci_remove_one,
362};
363
364
365static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
366{
367 return base + 0x100 + (port * 0x80);
368}
369
370static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
371{
372 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
373}
374
375static int ahci_port_start(struct ata_port *ap)
376{
377 struct device *dev = ap->host_set->dev;
378 struct ahci_host_priv *hpriv = ap->host_set->private_data;
379 struct ahci_port_priv *pp;
380 void __iomem *mmio = ap->host_set->mmio_base;
381 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
382 void *mem;
383 dma_addr_t mem_dma;
384 int rc;
385
386 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
387 if (!pp)
388 return -ENOMEM;
389 memset(pp, 0, sizeof(*pp));
390
391 rc = ata_pad_alloc(ap, dev);
392 if (rc) {
393 kfree(pp);
394 return rc;
395 }
396
397 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
398 if (!mem) {
399 ata_pad_free(ap, dev);
400 kfree(pp);
401 return -ENOMEM;
402 }
403 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
404
405 /*
406 * First item in chunk of DMA memory: 32-slot command table,
407 * 32 bytes each in size
408 */
409 pp->cmd_slot = mem;
410 pp->cmd_slot_dma = mem_dma;
411
412 mem += AHCI_CMD_SLOT_SZ;
413 mem_dma += AHCI_CMD_SLOT_SZ;
414
415 /*
416 * Second item: Received-FIS area
417 */
418 pp->rx_fis = mem;
419 pp->rx_fis_dma = mem_dma;
420
421 mem += AHCI_RX_FIS_SZ;
422 mem_dma += AHCI_RX_FIS_SZ;
423
424 /*
425 * Third item: data area for storing a single command
426 * and its scatter-gather table
427 */
428 pp->cmd_tbl = mem;
429 pp->cmd_tbl_dma = mem_dma;
430
431 ap->private_data = pp;
432
433 if (hpriv->cap & HOST_CAP_64)
434 writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
435 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
436 readl(port_mmio + PORT_LST_ADDR); /* flush */
437
438 if (hpriv->cap & HOST_CAP_64)
439 writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
440 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
441 readl(port_mmio + PORT_FIS_ADDR); /* flush */
442
443 writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
444 PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
445 PORT_CMD_START, port_mmio + PORT_CMD);
446 readl(port_mmio + PORT_CMD); /* flush */
447
448 return 0;
449}
450
451
452static void ahci_port_stop(struct ata_port *ap)
453{
454 struct device *dev = ap->host_set->dev;
455 struct ahci_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
458 u32 tmp;
459
460 tmp = readl(port_mmio + PORT_CMD);
461 tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
462 writel(tmp, port_mmio + PORT_CMD);
463 readl(port_mmio + PORT_CMD); /* flush */
464
465 /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
466 * this is slightly incorrect.
467 */
468 msleep(500);
469
470 ap->private_data = NULL;
471 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
472 pp->cmd_slot, pp->cmd_slot_dma);
473 ata_pad_free(ap, dev);
474 kfree(pp);
475}
476
477static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
478{
479 unsigned int sc_reg;
480
481 switch (sc_reg_in) {
482 case SCR_STATUS: sc_reg = 0; break;
483 case SCR_CONTROL: sc_reg = 1; break;
484 case SCR_ERROR: sc_reg = 2; break;
485 case SCR_ACTIVE: sc_reg = 3; break;
486 default:
487 return 0xffffffffU;
488 }
489
490 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
491}
492
493
494static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
495 u32 val)
496{
497 unsigned int sc_reg;
498
499 switch (sc_reg_in) {
500 case SCR_STATUS: sc_reg = 0; break;
501 case SCR_CONTROL: sc_reg = 1; break;
502 case SCR_ERROR: sc_reg = 2; break;
503 case SCR_ACTIVE: sc_reg = 3; break;
504 default:
505 return;
506 }
507
508 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
509}
510
511static int ahci_stop_engine(struct ata_port *ap)
512{
513 void __iomem *mmio = ap->host_set->mmio_base;
514 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
515 int work;
516 u32 tmp;
517
518 tmp = readl(port_mmio + PORT_CMD);
519 tmp &= ~PORT_CMD_START;
520 writel(tmp, port_mmio + PORT_CMD);
521
522 /* wait for engine to stop. TODO: this could be
523 * as long as 500 msec
524 */
525 work = 1000;
526 while (work-- > 0) {
527 tmp = readl(port_mmio + PORT_CMD);
528 if ((tmp & PORT_CMD_LIST_ON) == 0)
529 return 0;
530 udelay(10);
531 }
532
533 return -EIO;
534}
535
536static void ahci_start_engine(struct ata_port *ap)
537{
538 void __iomem *mmio = ap->host_set->mmio_base;
539 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
540 u32 tmp;
541
542 tmp = readl(port_mmio + PORT_CMD);
543 tmp |= PORT_CMD_START;
544 writel(tmp, port_mmio + PORT_CMD);
545 readl(port_mmio + PORT_CMD); /* flush */
546}
547
548static unsigned int ahci_dev_classify(struct ata_port *ap)
549{
550 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
551 struct ata_taskfile tf;
552 u32 tmp;
553
554 tmp = readl(port_mmio + PORT_SIG);
555 tf.lbah = (tmp >> 24) & 0xff;
556 tf.lbam = (tmp >> 16) & 0xff;
557 tf.lbal = (tmp >> 8) & 0xff;
558 tf.nsect = (tmp) & 0xff;
559
560 return ata_dev_classify(&tf);
561}
562
563static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
564 u32 opts)
565{
566 dma_addr_t cmd_tbl_dma;
567
568 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
569
570 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
571 pp->cmd_slot[tag].status = 0;
572 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
573 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
574}
575
576static int ahci_clo(struct ata_port *ap)
577{
578 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
579 struct ahci_host_priv *hpriv = ap->host_set->private_data;
580 u32 tmp;
581
582 if (!(hpriv->cap & HOST_CAP_CLO))
583 return -EOPNOTSUPP;
584
585 tmp = readl(port_mmio + PORT_CMD);
586 tmp |= PORT_CMD_CLO;
587 writel(tmp, port_mmio + PORT_CMD);
588
589 tmp = ata_wait_register(port_mmio + PORT_CMD,
590 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
591 if (tmp & PORT_CMD_CLO)
592 return -EIO;
593
594 return 0;
595}
596
597static int ahci_prereset(struct ata_port *ap)
598{
599 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
600 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
601 /* ATA_BUSY hasn't cleared, so send a CLO */
602 ahci_clo(ap);
603 }
604
605 return ata_std_prereset(ap);
606}
607
608static int ahci_softreset(struct ata_port *ap, unsigned int *class)
609{
610 struct ahci_port_priv *pp = ap->private_data;
611 void __iomem *mmio = ap->host_set->mmio_base;
612 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
613 const u32 cmd_fis_len = 5; /* five dwords */
614 const char *reason = NULL;
615 struct ata_taskfile tf;
616 u32 tmp;
617 u8 *fis;
618 int rc;
619
620 DPRINTK("ENTER\n");
621
622 if (ata_port_offline(ap)) {
623 DPRINTK("PHY reports no device\n");
624 *class = ATA_DEV_NONE;
625 return 0;
626 }
627
628 /* prepare for SRST (AHCI-1.1 10.4.1) */
629 rc = ahci_stop_engine(ap);
630 if (rc) {
631 reason = "failed to stop engine";
632 goto fail_restart;
633 }
634
635 /* check BUSY/DRQ, perform Command List Override if necessary */
636 ahci_tf_read(ap, &tf);
637 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
638 rc = ahci_clo(ap);
639
640 if (rc == -EOPNOTSUPP) {
641 reason = "port busy but CLO unavailable";
642 goto fail_restart;
643 } else if (rc) {
644 reason = "port busy but CLO failed";
645 goto fail_restart;
646 }
647 }
648
649 /* restart engine */
650 ahci_start_engine(ap);
651
652 ata_tf_init(ap->device, &tf);
653 fis = pp->cmd_tbl;
654
655 /* issue the first D2H Register FIS */
656 ahci_fill_cmd_slot(pp, 0,
657 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
658
659 tf.ctl |= ATA_SRST;
660 ata_tf_to_fis(&tf, fis, 0);
661 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
662
663 writel(1, port_mmio + PORT_CMD_ISSUE);
664
665 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
666 if (tmp & 0x1) {
667 rc = -EIO;
668 reason = "1st FIS failed";
669 goto fail;
670 }
671
672 /* spec says at least 5us, but be generous and sleep for 1ms */
673 msleep(1);
674
675 /* issue the second D2H Register FIS */
676 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
677
678 tf.ctl &= ~ATA_SRST;
679 ata_tf_to_fis(&tf, fis, 0);
680 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
681
682 writel(1, port_mmio + PORT_CMD_ISSUE);
683 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
684
685 /* spec mandates ">= 2ms" before checking status.
686 * We wait 150ms, because that was the magic delay used for
687 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
688 * between when the ATA command register is written, and then
689 * status is checked. Because waiting for "a while" before
690 * checking status is fine, post SRST, we perform this magic
691 * delay here as well.
692 */
693 msleep(150);
694
695 *class = ATA_DEV_NONE;
696 if (ata_port_online(ap)) {
697 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
698 rc = -EIO;
699 reason = "device not ready";
700 goto fail;
701 }
702 *class = ahci_dev_classify(ap);
703 }
704
705 DPRINTK("EXIT, class=%u\n", *class);
706 return 0;
707
708 fail_restart:
709 ahci_start_engine(ap);
710 fail:
711 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
712 return rc;
713}
714
715static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
716{
717 struct ahci_port_priv *pp = ap->private_data;
718 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
719 struct ata_taskfile tf;
720 int rc;
721
722 DPRINTK("ENTER\n");
723
724 ahci_stop_engine(ap);
725
726 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(ap->device, &tf);
728 tf.command = 0xff;
729 ata_tf_to_fis(&tf, d2h_fis, 0);
730
731 rc = sata_std_hardreset(ap, class);
732
733 ahci_start_engine(ap);
734
735 if (rc == 0 && ata_port_online(ap))
736 *class = ahci_dev_classify(ap);
737 if (*class == ATA_DEV_UNKNOWN)
738 *class = ATA_DEV_NONE;
739
740 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
741 return rc;
742}
743
744static void ahci_postreset(struct ata_port *ap, unsigned int *class)
745{
746 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
747 u32 new_tmp, tmp;
748
749 ata_std_postreset(ap, class);
750
751 /* Make sure port's ATAPI bit is set appropriately */
752 new_tmp = tmp = readl(port_mmio + PORT_CMD);
753 if (*class == ATA_DEV_ATAPI)
754 new_tmp |= PORT_CMD_ATAPI;
755 else
756 new_tmp &= ~PORT_CMD_ATAPI;
757 if (new_tmp != tmp) {
758 writel(new_tmp, port_mmio + PORT_CMD);
759 readl(port_mmio + PORT_CMD); /* flush */
760 }
761}
762
763static u8 ahci_check_status(struct ata_port *ap)
764{
765 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
766
767 return readl(mmio + PORT_TFDATA) & 0xFF;
768}
769
770static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
771{
772 struct ahci_port_priv *pp = ap->private_data;
773 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
774
775 ata_tf_from_fis(d2h_fis, tf);
776}
777
778static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
779{
780 struct scatterlist *sg;
781 struct ahci_sg *ahci_sg;
782 unsigned int n_sg = 0;
783
784 VPRINTK("ENTER\n");
785
786 /*
787 * Next, the S/G list.
788 */
789 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
790 ata_for_each_sg(sg, qc) {
791 dma_addr_t addr = sg_dma_address(sg);
792 u32 sg_len = sg_dma_len(sg);
793
794 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
795 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
796 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
797
798 ahci_sg++;
799 n_sg++;
800 }
801
802 return n_sg;
803}
804
805static void ahci_qc_prep(struct ata_queued_cmd *qc)
806{
807 struct ata_port *ap = qc->ap;
808 struct ahci_port_priv *pp = ap->private_data;
809 int is_atapi = is_atapi_taskfile(&qc->tf);
810 void *cmd_tbl;
811 u32 opts;
812 const u32 cmd_fis_len = 5; /* five dwords */
813 unsigned int n_elem;
814
815 /*
816 * Fill in command table information. First, the header,
817 * a SATA Register - Host to Device command FIS.
818 */
819 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
820
821 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
822 if (is_atapi) {
823 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
824 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
825 }
826
827 n_elem = 0;
828 if (qc->flags & ATA_QCFLAG_DMAMAP)
829 n_elem = ahci_fill_sg(qc, cmd_tbl);
830
831 /*
832 * Fill in command slot information.
833 */
834 opts = cmd_fis_len | n_elem << 16;
835 if (qc->tf.flags & ATA_TFLAG_WRITE)
836 opts |= AHCI_CMD_WRITE;
837 if (is_atapi)
838 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
839
840 ahci_fill_cmd_slot(pp, qc->tag, opts);
841}
842
843static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
844{
845 struct ahci_port_priv *pp = ap->private_data;
846 struct ata_eh_info *ehi = &ap->eh_info;
847 unsigned int err_mask = 0, action = 0;
848 struct ata_queued_cmd *qc;
849 u32 serror;
850
851 ata_ehi_clear_desc(ehi);
852
853 /* AHCI needs SError cleared; otherwise, it might lock up */
854 serror = ahci_scr_read(ap, SCR_ERROR);
855 ahci_scr_write(ap, SCR_ERROR, serror);
856
857 /* analyze @irq_stat */
858 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
859
860 if (irq_stat & PORT_IRQ_TF_ERR)
861 err_mask |= AC_ERR_DEV;
862
863 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
864 err_mask |= AC_ERR_HOST_BUS;
865 action |= ATA_EH_SOFTRESET;
866 }
867
868 if (irq_stat & PORT_IRQ_IF_ERR) {
869 err_mask |= AC_ERR_ATA_BUS;
870 action |= ATA_EH_SOFTRESET;
871 ata_ehi_push_desc(ehi, ", interface fatal error");
872 }
873
874 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
875 ata_ehi_hotplugged(ehi);
876 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
877 "connection status changed" : "PHY RDY changed");
878 }
879
880 if (irq_stat & PORT_IRQ_UNK_FIS) {
881 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
882
883 err_mask |= AC_ERR_HSM;
884 action |= ATA_EH_SOFTRESET;
885 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
886 unk[0], unk[1], unk[2], unk[3]);
887 }
888
889 /* okay, let's hand over to EH */
890 ehi->serror |= serror;
891 ehi->action |= action;
892
893 qc = ata_qc_from_tag(ap, ap->active_tag);
894 if (qc)
895 qc->err_mask |= err_mask;
896 else
897 ehi->err_mask |= err_mask;
898
899 if (irq_stat & PORT_IRQ_FREEZE)
900 ata_port_freeze(ap);
901 else
902 ata_port_abort(ap);
903}
904
905static void ahci_host_intr(struct ata_port *ap)
906{
907 void __iomem *mmio = ap->host_set->mmio_base;
908 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
909 struct ata_eh_info *ehi = &ap->eh_info;
910 u32 status, qc_active;
911 int rc;
912
913 status = readl(port_mmio + PORT_IRQ_STAT);
914 writel(status, port_mmio + PORT_IRQ_STAT);
915
916 if (unlikely(status & PORT_IRQ_ERROR)) {
917 ahci_error_intr(ap, status);
918 return;
919 }
920
921 if (ap->sactive)
922 qc_active = readl(port_mmio + PORT_SCR_ACT);
923 else
924 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
925
926 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
927 if (rc > 0)
928 return;
929 if (rc < 0) {
930 ehi->err_mask |= AC_ERR_HSM;
931 ehi->action |= ATA_EH_SOFTRESET;
932 ata_port_freeze(ap);
933 return;
934 }
935
936 /* hmmm... a spurious interupt */
937
938 /* some devices send D2H reg with I bit set during NCQ command phase */
939 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
940 return;
941
942 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag)) {
944 struct ata_queued_cmd *qc =
945 ata_qc_from_tag(ap, ap->active_tag);
946
947 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
948 (status & PORT_IRQ_PIOS_FIS))
949 return;
950 }
951
952 if (ata_ratelimit())
953 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
954 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
955 status, ap->active_tag, ap->sactive);
956}
957
958static void ahci_irq_clear(struct ata_port *ap)
959{
960 /* TODO */
961}
962
963static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
964{
965 struct ata_host_set *host_set = dev_instance;
966 struct ahci_host_priv *hpriv;
967 unsigned int i, handled = 0;
968 void __iomem *mmio;
969 u32 irq_stat, irq_ack = 0;
970
971 VPRINTK("ENTER\n");
972
973 hpriv = host_set->private_data;
974 mmio = host_set->mmio_base;
975
976 /* sigh. 0xffffffff is a valid return from h/w */
977 irq_stat = readl(mmio + HOST_IRQ_STAT);
978 irq_stat &= hpriv->port_map;
979 if (!irq_stat)
980 return IRQ_NONE;
981
982 spin_lock(&host_set->lock);
983
984 for (i = 0; i < host_set->n_ports; i++) {
985 struct ata_port *ap;
986
987 if (!(irq_stat & (1 << i)))
988 continue;
989
990 ap = host_set->ports[i];
991 if (ap) {
992 ahci_host_intr(ap);
993 VPRINTK("port %u\n", i);
994 } else {
995 VPRINTK("port %u (no irq)\n", i);
996 if (ata_ratelimit())
997 dev_printk(KERN_WARNING, host_set->dev,
998 "interrupt on disabled port %u\n", i);
999 }
1000
1001 irq_ack |= (1 << i);
1002 }
1003
1004 if (irq_ack) {
1005 writel(irq_ack, mmio + HOST_IRQ_STAT);
1006 handled = 1;
1007 }
1008
1009 spin_unlock(&host_set->lock);
1010
1011 VPRINTK("EXIT\n");
1012
1013 return IRQ_RETVAL(handled);
1014}
1015
1016static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1017{
1018 struct ata_port *ap = qc->ap;
1019 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1020
1021 if (qc->tf.protocol == ATA_PROT_NCQ)
1022 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1023 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1024 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1025
1026 return 0;
1027}
1028
1029static void ahci_freeze(struct ata_port *ap)
1030{
1031 void __iomem *mmio = ap->host_set->mmio_base;
1032 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1033
1034 /* turn IRQ off */
1035 writel(0, port_mmio + PORT_IRQ_MASK);
1036}
1037
1038static void ahci_thaw(struct ata_port *ap)
1039{
1040 void __iomem *mmio = ap->host_set->mmio_base;
1041 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1042 u32 tmp;
1043
1044 /* clear IRQ */
1045 tmp = readl(port_mmio + PORT_IRQ_STAT);
1046 writel(tmp, port_mmio + PORT_IRQ_STAT);
1047 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1048
1049 /* turn IRQ back on */
1050 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1051}
1052
1053static void ahci_error_handler(struct ata_port *ap)
1054{
1055 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1056 /* restart engine */
1057 ahci_stop_engine(ap);
1058 ahci_start_engine(ap);
1059 }
1060
1061 /* perform recovery */
1062 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1063 ahci_postreset);
1064}
1065
1066static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1067{
1068 struct ata_port *ap = qc->ap;
1069
1070 if (qc->flags & ATA_QCFLAG_FAILED)
1071 qc->err_mask |= AC_ERR_OTHER;
1072
1073 if (qc->err_mask) {
1074 /* make DMA engine forget about the failed command */
1075 ahci_stop_engine(ap);
1076 ahci_start_engine(ap);
1077 }
1078}
1079
1080static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1081 unsigned int port_idx)
1082{
1083 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1084 base = ahci_port_base_ul(base, port_idx);
1085 VPRINTK("base now==0x%lx\n", base);
1086
1087 port->cmd_addr = base;
1088 port->scr_addr = base + PORT_SCR;
1089
1090 VPRINTK("EXIT\n");
1091}
1092
1093static int ahci_host_init(struct ata_probe_ent *probe_ent)
1094{
1095 struct ahci_host_priv *hpriv = probe_ent->private_data;
1096 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1097 void __iomem *mmio = probe_ent->mmio_base;
1098 u32 tmp, cap_save;
1099 unsigned int i, j, using_dac;
1100 int rc;
1101 void __iomem *port_mmio;
1102
1103 cap_save = readl(mmio + HOST_CAP);
1104 cap_save &= ( (1<<28) | (1<<17) );
1105 cap_save |= (1 << 27);
1106
1107 /* global controller reset */
1108 tmp = readl(mmio + HOST_CTL);
1109 if ((tmp & HOST_RESET) == 0) {
1110 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1111 readl(mmio + HOST_CTL); /* flush */
1112 }
1113
1114 /* reset must complete within 1 second, or
1115 * the hardware should be considered fried.
1116 */
1117 ssleep(1);
1118
1119 tmp = readl(mmio + HOST_CTL);
1120 if (tmp & HOST_RESET) {
1121 dev_printk(KERN_ERR, &pdev->dev,
1122 "controller reset failed (0x%x)\n", tmp);
1123 return -EIO;
1124 }
1125
1126 writel(HOST_AHCI_EN, mmio + HOST_CTL);
1127 (void) readl(mmio + HOST_CTL); /* flush */
1128 writel(cap_save, mmio + HOST_CAP);
1129 writel(0xf, mmio + HOST_PORTS_IMPL);
1130 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1131
1132 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1133 u16 tmp16;
1134
1135 pci_read_config_word(pdev, 0x92, &tmp16);
1136 tmp16 |= 0xf;
1137 pci_write_config_word(pdev, 0x92, tmp16);
1138 }
1139
1140 hpriv->cap = readl(mmio + HOST_CAP);
1141 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1142 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1143
1144 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1145 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1146
1147 using_dac = hpriv->cap & HOST_CAP_64;
1148 if (using_dac &&
1149 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1150 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1151 if (rc) {
1152 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1153 if (rc) {
1154 dev_printk(KERN_ERR, &pdev->dev,
1155 "64-bit DMA enable failed\n");
1156 return rc;
1157 }
1158 }
1159 } else {
1160 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1161 if (rc) {
1162 dev_printk(KERN_ERR, &pdev->dev,
1163 "32-bit DMA enable failed\n");
1164 return rc;
1165 }
1166 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1167 if (rc) {
1168 dev_printk(KERN_ERR, &pdev->dev,
1169 "32-bit consistent DMA enable failed\n");
1170 return rc;
1171 }
1172 }
1173
1174 for (i = 0; i < probe_ent->n_ports; i++) {
1175#if 0 /* BIOSen initialize this incorrectly */
1176 if (!(hpriv->port_map & (1 << i)))
1177 continue;
1178#endif
1179
1180 port_mmio = ahci_port_base(mmio, i);
1181 VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
1182
1183 ahci_setup_port(&probe_ent->port[i],
1184 (unsigned long) mmio, i);
1185
1186 /* make sure port is not active */
1187 tmp = readl(port_mmio + PORT_CMD);
1188 VPRINTK("PORT_CMD 0x%x\n", tmp);
1189 if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1190 PORT_CMD_FIS_RX | PORT_CMD_START)) {
1191 tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1192 PORT_CMD_FIS_RX | PORT_CMD_START);
1193 writel(tmp, port_mmio + PORT_CMD);
1194 readl(port_mmio + PORT_CMD); /* flush */
1195
1196 /* spec says 500 msecs for each bit, so
1197 * this is slightly incorrect.
1198 */
1199 msleep(500);
1200 }
1201
1202 writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
1203
1204 j = 0;
1205 while (j < 100) {
1206 msleep(10);
1207 tmp = readl(port_mmio + PORT_SCR_STAT);
1208 if ((tmp & 0xf) == 0x3)
1209 break;
1210 j++;
1211 }
1212
1213 tmp = readl(port_mmio + PORT_SCR_ERR);
1214 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1215 writel(tmp, port_mmio + PORT_SCR_ERR);
1216
1217 /* ack any pending irq events for this port */
1218 tmp = readl(port_mmio + PORT_IRQ_STAT);
1219 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1220 if (tmp)
1221 writel(tmp, port_mmio + PORT_IRQ_STAT);
1222
1223 writel(1 << i, mmio + HOST_IRQ_STAT);
1224 }
1225
1226 tmp = readl(mmio + HOST_CTL);
1227 VPRINTK("HOST_CTL 0x%x\n", tmp);
1228 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1229 tmp = readl(mmio + HOST_CTL);
1230 VPRINTK("HOST_CTL 0x%x\n", tmp);
1231
1232 pci_set_master(pdev);
1233
1234 return 0;
1235}
1236
1237static void ahci_print_info(struct ata_probe_ent *probe_ent)
1238{
1239 struct ahci_host_priv *hpriv = probe_ent->private_data;
1240 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1241 void __iomem *mmio = probe_ent->mmio_base;
1242 u32 vers, cap, impl, speed;
1243 const char *speed_s;
1244 u16 cc;
1245 const char *scc_s;
1246
1247 vers = readl(mmio + HOST_VERSION);
1248 cap = hpriv->cap;
1249 impl = hpriv->port_map;
1250
1251 speed = (cap >> 20) & 0xf;
1252 if (speed == 1)
1253 speed_s = "1.5";
1254 else if (speed == 2)
1255 speed_s = "3";
1256 else
1257 speed_s = "?";
1258
1259 pci_read_config_word(pdev, 0x0a, &cc);
1260 if (cc == 0x0101)
1261 scc_s = "IDE";
1262 else if (cc == 0x0106)
1263 scc_s = "SATA";
1264 else if (cc == 0x0104)
1265 scc_s = "RAID";
1266 else
1267 scc_s = "unknown";
1268
1269 dev_printk(KERN_INFO, &pdev->dev,
1270 "AHCI %02x%02x.%02x%02x "
1271 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1272 ,
1273
1274 (vers >> 24) & 0xff,
1275 (vers >> 16) & 0xff,
1276 (vers >> 8) & 0xff,
1277 vers & 0xff,
1278
1279 ((cap >> 8) & 0x1f) + 1,
1280 (cap & 0x1f) + 1,
1281 speed_s,
1282 impl,
1283 scc_s);
1284
1285 dev_printk(KERN_INFO, &pdev->dev,
1286 "flags: "
1287 "%s%s%s%s%s%s"
1288 "%s%s%s%s%s%s%s\n"
1289 ,
1290
1291 cap & (1 << 31) ? "64bit " : "",
1292 cap & (1 << 30) ? "ncq " : "",
1293 cap & (1 << 28) ? "ilck " : "",
1294 cap & (1 << 27) ? "stag " : "",
1295 cap & (1 << 26) ? "pm " : "",
1296 cap & (1 << 25) ? "led " : "",
1297
1298 cap & (1 << 24) ? "clo " : "",
1299 cap & (1 << 19) ? "nz " : "",
1300 cap & (1 << 18) ? "only " : "",
1301 cap & (1 << 17) ? "pmp " : "",
1302 cap & (1 << 15) ? "pio " : "",
1303 cap & (1 << 14) ? "slum " : "",
1304 cap & (1 << 13) ? "part " : ""
1305 );
1306}
1307
1308static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1309{
1310 static int printed_version;
1311 struct ata_probe_ent *probe_ent = NULL;
1312 struct ahci_host_priv *hpriv;
1313 unsigned long base;
1314 void __iomem *mmio_base;
1315 unsigned int board_idx = (unsigned int) ent->driver_data;
1316 int have_msi, pci_dev_busy = 0;
1317 int rc;
1318
1319 VPRINTK("ENTER\n");
1320
1321 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1322
1323 if (!printed_version++)
1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1325
1326 rc = pci_enable_device(pdev);
1327 if (rc)
1328 return rc;
1329
1330 rc = pci_request_regions(pdev, DRV_NAME);
1331 if (rc) {
1332 pci_dev_busy = 1;
1333 goto err_out;
1334 }
1335
1336 if (pci_enable_msi(pdev) == 0)
1337 have_msi = 1;
1338 else {
1339 pci_intx(pdev, 1);
1340 have_msi = 0;
1341 }
1342
1343 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1344 if (probe_ent == NULL) {
1345 rc = -ENOMEM;
1346 goto err_out_msi;
1347 }
1348
1349 memset(probe_ent, 0, sizeof(*probe_ent));
1350 probe_ent->dev = pci_dev_to_dev(pdev);
1351 INIT_LIST_HEAD(&probe_ent->node);
1352
1353 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1354 if (mmio_base == NULL) {
1355 rc = -ENOMEM;
1356 goto err_out_free_ent;
1357 }
1358 base = (unsigned long) mmio_base;
1359
1360 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1361 if (!hpriv) {
1362 rc = -ENOMEM;
1363 goto err_out_iounmap;
1364 }
1365 memset(hpriv, 0, sizeof(*hpriv));
1366
1367 probe_ent->sht = ahci_port_info[board_idx].sht;
1368 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1369 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1370 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1371 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1372
1373 probe_ent->irq = pdev->irq;
1374 probe_ent->irq_flags = SA_SHIRQ;
1375 probe_ent->mmio_base = mmio_base;
1376 probe_ent->private_data = hpriv;
1377
1378 if (have_msi)
1379 hpriv->flags |= AHCI_FLAG_MSI;
1380
1381 /* JMicron-specific fixup: make sure we're in AHCI mode */
1382 if (pdev->vendor == 0x197b)
1383 pci_write_config_byte(pdev, 0x41, 0xa1);
1384
1385 /* initialize adapter */
1386 rc = ahci_host_init(probe_ent);
1387 if (rc)
1388 goto err_out_hpriv;
1389
1390 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1391 (hpriv->cap & HOST_CAP_NCQ))
1392 probe_ent->host_flags |= ATA_FLAG_NCQ;
1393
1394 ahci_print_info(probe_ent);
1395
1396 /* FIXME: check ata_device_add return value */
1397 ata_device_add(probe_ent);
1398 kfree(probe_ent);
1399
1400 return 0;
1401
1402err_out_hpriv:
1403 kfree(hpriv);
1404err_out_iounmap:
1405 pci_iounmap(pdev, mmio_base);
1406err_out_free_ent:
1407 kfree(probe_ent);
1408err_out_msi:
1409 if (have_msi)
1410 pci_disable_msi(pdev);
1411 else
1412 pci_intx(pdev, 0);
1413 pci_release_regions(pdev);
1414err_out:
1415 if (!pci_dev_busy)
1416 pci_disable_device(pdev);
1417 return rc;
1418}
1419
1420static void ahci_remove_one (struct pci_dev *pdev)
1421{
1422 struct device *dev = pci_dev_to_dev(pdev);
1423 struct ata_host_set *host_set = dev_get_drvdata(dev);
1424 struct ahci_host_priv *hpriv = host_set->private_data;
1425 unsigned int i;
1426 int have_msi;
1427
1428 for (i = 0; i < host_set->n_ports; i++)
1429 ata_port_detach(host_set->ports[i]);
1430
1431 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1432 free_irq(host_set->irq, host_set);
1433
1434 for (i = 0; i < host_set->n_ports; i++) {
1435 struct ata_port *ap = host_set->ports[i];
1436
1437 ata_scsi_release(ap->host);
1438 scsi_host_put(ap->host);
1439 }
1440
1441 kfree(hpriv);
1442 pci_iounmap(pdev, host_set->mmio_base);
1443 kfree(host_set);
1444
1445 if (have_msi)
1446 pci_disable_msi(pdev);
1447 else
1448 pci_intx(pdev, 0);
1449 pci_release_regions(pdev);
1450 pci_disable_device(pdev);
1451 dev_set_drvdata(dev, NULL);
1452}
1453
1454static int __init ahci_init(void)
1455{
1456 return pci_module_init(&ahci_pci_driver);
1457}
1458
1459static void __exit ahci_exit(void)
1460{
1461 pci_unregister_driver(&ahci_pci_driver);
1462}
1463
1464
1465MODULE_AUTHOR("Jeff Garzik");
1466MODULE_DESCRIPTION("AHCI SATA low-level driver");
1467MODULE_LICENSE("GPL");
1468MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1469MODULE_VERSION(DRV_VERSION);
1470
1471module_init(ahci_init);
1472module_exit(ahci_exit);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index d754b3267863..1ac119733bac 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -65,7 +65,7 @@ aic7770_map_int(struct ahc_softc *ahc, u_int irq)
65 65
66 shared = 0; 66 shared = 0;
67 if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0) 67 if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0)
68 shared = SA_SHIRQ; 68 shared = IRQF_SHARED;
69 69
70 error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc); 70 error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc);
71 if (error == 0) 71 if (error == 0)
@@ -132,7 +132,8 @@ static struct eisa_device_id aic7770_ids[] = {
132 { "ADP7770", 5 }, /* AIC7770 generic */ 132 { "ADP7770", 5 }, /* AIC7770 generic */
133 { "" } 133 { "" }
134}; 134};
135 135MODULE_DEVICE_TABLE(eisa, aic7770_ids);
136
136static struct eisa_driver aic7770_driver = { 137static struct eisa_driver aic7770_driver = {
137 .id_table = aic7770_ids, 138 .id_table = aic7770_ids,
138 .driver = { 139 .driver = {
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index eb7745692682..df3346b5caf8 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1487,6 +1487,7 @@ typedef enum {
1487} ahd_queue_alg; 1487} ahd_queue_alg;
1488 1488
1489void ahd_set_tags(struct ahd_softc *ahd, 1489void ahd_set_tags(struct ahd_softc *ahd,
1490 struct scsi_cmnd *cmd,
1490 struct ahd_devinfo *devinfo, 1491 struct ahd_devinfo *devinfo,
1491 ahd_queue_alg alg); 1492 ahd_queue_alg alg);
1492 1493
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 801fc81d0b20..653818d2f802 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -1090,7 +1090,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1090 1090
1091 /* Notify XPT */ 1091 /* Notify XPT */
1092 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1092 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1093 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 1093 CAM_LUN_WILDCARD, AC_SENT_BDR);
1094 1094
1095 /* 1095 /*
1096 * Allow the sequencer to continue with 1096 * Allow the sequencer to continue with
@@ -3062,7 +3062,7 @@ ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3062 tinfo->curr.ppr_options = ppr_options; 3062 tinfo->curr.ppr_options = ppr_options;
3063 3063
3064 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3064 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3065 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 3065 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
3066 if (bootverbose) { 3066 if (bootverbose) {
3067 if (offset != 0) { 3067 if (offset != 0) {
3068 int options; 3068 int options;
@@ -3184,7 +3184,7 @@ ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3184 3184
3185 tinfo->curr.width = width; 3185 tinfo->curr.width = width;
3186 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3186 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3187 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 3187 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
3188 if (bootverbose) { 3188 if (bootverbose) {
3189 printf("%s: target %d using %dbit transfers\n", 3189 printf("%s: target %d using %dbit transfers\n",
3190 ahd_name(ahd), devinfo->target, 3190 ahd_name(ahd), devinfo->target,
@@ -3211,12 +3211,14 @@ ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3211 * Update the current state of tagged queuing for a given target. 3211 * Update the current state of tagged queuing for a given target.
3212 */ 3212 */
3213void 3213void
3214ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3214ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3215 ahd_queue_alg alg) 3215 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
3216{ 3216{
3217 ahd_platform_set_tags(ahd, devinfo, alg); 3217 struct scsi_device *sdev = cmd->device;
3218
3219 ahd_platform_set_tags(ahd, sdev, devinfo, alg);
3218 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3220 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3219 devinfo->lun, AC_TRANSFER_NEG, &alg); 3221 devinfo->lun, AC_TRANSFER_NEG);
3220} 3222}
3221 3223
3222static void 3224static void
@@ -4746,7 +4748,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4746 printf("(%s:%c:%d:%d): refuses tagged commands. " 4748 printf("(%s:%c:%d:%d): refuses tagged commands. "
4747 "Performing non-tagged I/O\n", ahd_name(ahd), 4749 "Performing non-tagged I/O\n", ahd_name(ahd),
4748 devinfo->channel, devinfo->target, devinfo->lun); 4750 devinfo->channel, devinfo->target, devinfo->lun);
4749 ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); 4751 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE);
4750 mask = ~0x23; 4752 mask = ~0x23;
4751 } else { 4753 } else {
4752 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 4754 printf("(%s:%c:%d:%d): refuses %s tagged commands. "
@@ -4754,7 +4756,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4754 ahd_name(ahd), devinfo->channel, devinfo->target, 4756 ahd_name(ahd), devinfo->channel, devinfo->target,
4755 devinfo->lun, tag_type == MSG_ORDERED_TASK 4757 devinfo->lun, tag_type == MSG_ORDERED_TASK
4756 ? "ordered" : "head of queue"); 4758 ? "ordered" : "head of queue");
4757 ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); 4759 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
4758 mask = ~0x03; 4760 mask = ~0x03;
4759 } 4761 }
4760 4762
@@ -5098,7 +5100,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5098 5100
5099 if (status != CAM_SEL_TIMEOUT) 5101 if (status != CAM_SEL_TIMEOUT)
5100 ahd_send_async(ahd, devinfo->channel, devinfo->target, 5102 ahd_send_async(ahd, devinfo->channel, devinfo->target,
5101 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 5103 CAM_LUN_WILDCARD, AC_SENT_BDR);
5102 5104
5103 if (message != NULL && bootverbose) 5105 if (message != NULL && bootverbose)
5104 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 5106 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
@@ -7287,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
7287 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7288} 7290}
7289 7291
7290void 7292static void
7291ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
7292{ 7294{
7293 cam_status ostat; 7295 cam_status ostat;
@@ -7952,7 +7954,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7952#endif 7954#endif
7953 /* Notify the XPT that a bus reset occurred */ 7955 /* Notify the XPT that a bus reset occurred */
7954 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, 7956 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
7955 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 7957 CAM_LUN_WILDCARD, AC_BUS_RESET);
7956 7958
7957 /* 7959 /*
7958 * Revert to async/narrow transfers until we renegotiate. 7960 * Revert to async/narrow transfers until we renegotiate.
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index e0ccdf362200..c7eeaced324a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
243static uint32_t aic79xx_no_reset; 243static uint32_t aic79xx_no_reset;
244 244
245/* 245/*
246 * Certain PCI motherboards will scan PCI devices from highest to lowest,
247 * others scan from lowest to highest, and they tend to do all kinds of
248 * strange things when they come into contact with PCI bridge chips. The
249 * net result of all this is that the PCI card that is actually used to boot
250 * the machine is very hard to detect. Most motherboards go from lowest
251 * PCI slot number to highest, and the first SCSI controller found is the
252 * one you boot from. The only exceptions to this are when a controller
253 * has its BIOS disabled. So, we by default sort all of our SCSI controllers
254 * from lowest PCI slot number to highest PCI slot number. We also force
255 * all controllers with their BIOS disabled to the end of the list. This
256 * works on *almost* all computers. Where it doesn't work, we have this
257 * option. Setting this option to non-0 will reverse the order of the sort
258 * to highest first, then lowest, but will still leave cards with their BIOS
259 * disabled at the very end. That should fix everyone up unless there are
260 * really strange cirumstances.
261 */
262static uint32_t aic79xx_reverse_scan;
263
264/*
265 * Should we force EXTENDED translation on a controller. 246 * Should we force EXTENDED translation on a controller.
266 * 0 == Use whatever is in the SEEPROM or default to off 247 * 0 == Use whatever is in the SEEPROM or default to off
267 * 1 == Use whatever is in the SEEPROM or default to on 248 * 1 == Use whatever is in the SEEPROM or default to on
@@ -340,7 +321,7 @@ MODULE_LICENSE("Dual BSD/GPL");
340MODULE_VERSION(AIC79XX_DRIVER_VERSION); 321MODULE_VERSION(AIC79XX_DRIVER_VERSION);
341module_param(aic79xx, charp, 0444); 322module_param(aic79xx, charp, 0444);
342MODULE_PARM_DESC(aic79xx, 323MODULE_PARM_DESC(aic79xx,
343"period delimited, options string.\n" 324"period-delimited options string:\n"
344" verbose Enable verbose/diagnostic logging\n" 325" verbose Enable verbose/diagnostic logging\n"
345" allow_memio Allow device registers to be memory mapped\n" 326" allow_memio Allow device registers to be memory mapped\n"
346" debug Bitmask of debug values to enable\n" 327" debug Bitmask of debug values to enable\n"
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx,
350" periodically to prevent tag starvation.\n" 331" periodically to prevent tag starvation.\n"
351" This may be required by some older disk\n" 332" This may be required by some older disk\n"
352" or drives/RAID arrays.\n" 333" or drives/RAID arrays.\n"
353" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
354" tag_info:<tag_str> Set per-target tag depth\n" 334" tag_info:<tag_str> Set per-target tag depth\n"
355" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 335" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
356" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 336" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
@@ -366,7 +346,7 @@ MODULE_PARM_DESC(aic79xx,
366" Shorten the selection timeout to 128ms\n" 346" Shorten the selection timeout to 128ms\n"
367"\n" 347"\n"
368" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 348" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
369"\n"); 349);
370 350
371static void ahd_linux_handle_scsi_status(struct ahd_softc *, 351static void ahd_linux_handle_scsi_status(struct ahd_softc *,
372 struct scsi_device *, 352 struct scsi_device *,
@@ -484,7 +464,6 @@ ahd_linux_target_alloc(struct scsi_target *starget)
484 struct seeprom_config *sc = ahd->seep_config; 464 struct seeprom_config *sc = ahd->seep_config;
485 unsigned long flags; 465 unsigned long flags;
486 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); 466 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
487 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
488 struct ahd_devinfo devinfo; 467 struct ahd_devinfo devinfo;
489 struct ahd_initiator_tinfo *tinfo; 468 struct ahd_initiator_tinfo *tinfo;
490 struct ahd_tmode_tstate *tstate; 469 struct ahd_tmode_tstate *tstate;
@@ -495,7 +474,6 @@ ahd_linux_target_alloc(struct scsi_target *starget)
495 BUG_ON(*ahd_targp != NULL); 474 BUG_ON(*ahd_targp != NULL);
496 475
497 *ahd_targp = starget; 476 *ahd_targp = starget;
498 memset(targ, 0, sizeof(*targ));
499 477
500 if (sc) { 478 if (sc) {
501 int flags = sc->device_flags[starget->id]; 479 int flags = sc->device_flags[starget->id];
@@ -551,15 +529,11 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
551{ 529{
552 struct ahd_softc *ahd = 530 struct ahd_softc *ahd =
553 *((struct ahd_softc **)sdev->host->hostdata); 531 *((struct ahd_softc **)sdev->host->hostdata);
554 struct scsi_target *starget = sdev->sdev_target;
555 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
556 struct ahd_linux_device *dev; 532 struct ahd_linux_device *dev;
557 533
558 if (bootverbose) 534 if (bootverbose)
559 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id); 535 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
560 536
561 BUG_ON(targ->sdev[sdev->lun] != NULL);
562
563 dev = scsi_transport_device_data(sdev); 537 dev = scsi_transport_device_data(sdev);
564 memset(dev, 0, sizeof(*dev)); 538 memset(dev, 0, sizeof(*dev));
565 539
@@ -576,8 +550,6 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
576 */ 550 */
577 dev->maxtags = 0; 551 dev->maxtags = 0;
578 552
579 targ->sdev[sdev->lun] = sdev;
580
581 return (0); 553 return (0);
582} 554}
583 555
@@ -599,23 +571,6 @@ ahd_linux_slave_configure(struct scsi_device *sdev)
599 return 0; 571 return 0;
600} 572}
601 573
602static void
603ahd_linux_slave_destroy(struct scsi_device *sdev)
604{
605 struct ahd_softc *ahd;
606 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
607 struct ahd_linux_target *targ = scsi_transport_target_data(sdev->sdev_target);
608
609 ahd = *((struct ahd_softc **)sdev->host->hostdata);
610 if (bootverbose)
611 printf("%s: Slave Destroy %d\n", ahd_name(ahd), sdev->id);
612
613 BUG_ON(dev->active);
614
615 targ->sdev[sdev->lun] = NULL;
616
617}
618
619#if defined(__i386__) 574#if defined(__i386__)
620/* 575/*
621 * Return the disk geometry for the given SCSI device. 576 * Return the disk geometry for the given SCSI device.
@@ -822,7 +777,6 @@ struct scsi_host_template aic79xx_driver_template = {
822 .use_clustering = ENABLE_CLUSTERING, 777 .use_clustering = ENABLE_CLUSTERING,
823 .slave_alloc = ahd_linux_slave_alloc, 778 .slave_alloc = ahd_linux_slave_alloc,
824 .slave_configure = ahd_linux_slave_configure, 779 .slave_configure = ahd_linux_slave_configure,
825 .slave_destroy = ahd_linux_slave_destroy,
826 .target_alloc = ahd_linux_target_alloc, 780 .target_alloc = ahd_linux_target_alloc,
827 .target_destroy = ahd_linux_target_destroy, 781 .target_destroy = ahd_linux_target_destroy,
828}; 782};
@@ -1057,7 +1011,6 @@ aic79xx_setup(char *s)
1057#ifdef AHD_DEBUG 1011#ifdef AHD_DEBUG
1058 { "debug", &ahd_debug }, 1012 { "debug", &ahd_debug },
1059#endif 1013#endif
1060 { "reverse_scan", &aic79xx_reverse_scan },
1061 { "periodic_otag", &aic79xx_periodic_otag }, 1014 { "periodic_otag", &aic79xx_periodic_otag },
1062 { "pci_parity", &aic79xx_pci_parity }, 1015 { "pci_parity", &aic79xx_pci_parity },
1063 { "seltime", &aic79xx_seltime }, 1016 { "seltime", &aic79xx_seltime },
@@ -1249,20 +1202,13 @@ void
1249ahd_platform_free(struct ahd_softc *ahd) 1202ahd_platform_free(struct ahd_softc *ahd)
1250{ 1203{
1251 struct scsi_target *starget; 1204 struct scsi_target *starget;
1252 int i, j; 1205 int i;
1253 1206
1254 if (ahd->platform_data != NULL) { 1207 if (ahd->platform_data != NULL) {
1255 /* destroy all of the device and target objects */ 1208 /* destroy all of the device and target objects */
1256 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1209 for (i = 0; i < AHD_NUM_TARGETS; i++) {
1257 starget = ahd->platform_data->starget[i]; 1210 starget = ahd->platform_data->starget[i];
1258 if (starget != NULL) { 1211 if (starget != NULL) {
1259 for (j = 0; j < AHD_NUM_LUNS; j++) {
1260 struct ahd_linux_target *targ =
1261 scsi_transport_target_data(starget);
1262 if (targ->sdev[j] == NULL)
1263 continue;
1264 targ->sdev[j] = NULL;
1265 }
1266 ahd->platform_data->starget[i] = NULL; 1212 ahd->platform_data->starget[i] = NULL;
1267 } 1213 }
1268 } 1214 }
@@ -1318,20 +1264,13 @@ ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
1318} 1264}
1319 1265
1320void 1266void
1321ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 1267ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1322 ahd_queue_alg alg) 1268 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
1323{ 1269{
1324 struct scsi_target *starget;
1325 struct ahd_linux_target *targ;
1326 struct ahd_linux_device *dev; 1270 struct ahd_linux_device *dev;
1327 struct scsi_device *sdev;
1328 int was_queuing; 1271 int was_queuing;
1329 int now_queuing; 1272 int now_queuing;
1330 1273
1331 starget = ahd->platform_data->starget[devinfo->target];
1332 targ = scsi_transport_target_data(starget);
1333 BUG_ON(targ == NULL);
1334 sdev = targ->sdev[devinfo->lun];
1335 if (sdev == NULL) 1274 if (sdev == NULL)
1336 return; 1275 return;
1337 1276
@@ -1467,11 +1406,15 @@ ahd_linux_device_queue_depth(struct scsi_device *sdev)
1467 tags = ahd_linux_user_tagdepth(ahd, &devinfo); 1406 tags = ahd_linux_user_tagdepth(ahd, &devinfo);
1468 if (tags != 0 && sdev->tagged_supported != 0) { 1407 if (tags != 0 && sdev->tagged_supported != 0) {
1469 1408
1470 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED); 1409 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED);
1410 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1411 devinfo.lun, AC_TRANSFER_NEG);
1471 ahd_print_devinfo(ahd, &devinfo); 1412 ahd_print_devinfo(ahd, &devinfo);
1472 printf("Tagged Queuing enabled. Depth %d\n", tags); 1413 printf("Tagged Queuing enabled. Depth %d\n", tags);
1473 } else { 1414 } else {
1474 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE); 1415 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
1416 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1417 devinfo.lun, AC_TRANSFER_NEG);
1475 } 1418 }
1476} 1419}
1477 1420
@@ -1629,7 +1572,7 @@ ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
1629 1572
1630void 1573void
1631ahd_send_async(struct ahd_softc *ahd, char channel, 1574ahd_send_async(struct ahd_softc *ahd, char channel,
1632 u_int target, u_int lun, ac_code code, void *arg) 1575 u_int target, u_int lun, ac_code code)
1633{ 1576{
1634 switch (code) { 1577 switch (code) {
1635 case AC_TRANSFER_NEG: 1578 case AC_TRANSFER_NEG:
@@ -1956,7 +1899,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1956 } 1899 }
1957 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 1900 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
1958 ahd_set_scsi_status(scb, SCSI_STATUS_OK); 1901 ahd_set_scsi_status(scb, SCSI_STATUS_OK);
1959 ahd_platform_set_tags(ahd, &devinfo, 1902 ahd_platform_set_tags(ahd, sdev, &devinfo,
1960 (dev->flags & AHD_DEV_Q_BASIC) 1903 (dev->flags & AHD_DEV_Q_BASIC)
1961 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1904 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
1962 break; 1905 break;
@@ -1966,7 +1909,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1966 * as if the target returned BUSY SCSI status. 1909 * as if the target returned BUSY SCSI status.
1967 */ 1910 */
1968 dev->openings = 1; 1911 dev->openings = 1;
1969 ahd_platform_set_tags(ahd, &devinfo, 1912 ahd_platform_set_tags(ahd, sdev, &devinfo,
1970 (dev->flags & AHD_DEV_Q_BASIC) 1913 (dev->flags & AHD_DEV_Q_BASIC)
1971 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1914 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
1972 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY); 1915 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
@@ -2778,8 +2721,6 @@ ahd_linux_init(void)
2778 if (!ahd_linux_transport_template) 2721 if (!ahd_linux_transport_template)
2779 return -ENODEV; 2722 return -ENODEV;
2780 2723
2781 scsi_transport_reserve_target(ahd_linux_transport_template,
2782 sizeof(struct ahd_linux_target));
2783 scsi_transport_reserve_device(ahd_linux_transport_template, 2724 scsi_transport_reserve_device(ahd_linux_transport_template,
2784 sizeof(struct ahd_linux_device)); 2725 sizeof(struct ahd_linux_device));
2785 2726
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 2b8331649eeb..601340d84410 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -42,7 +42,6 @@
42#ifndef _AIC79XX_LINUX_H_ 42#ifndef _AIC79XX_LINUX_H_
43#define _AIC79XX_LINUX_H_ 43#define _AIC79XX_LINUX_H_
44 44
45#include <linux/config.h>
46#include <linux/types.h> 45#include <linux/types.h>
47#include <linux/blkdev.h> 46#include <linux/blkdev.h>
48#include <linux/delay.h> 47#include <linux/delay.h>
@@ -94,7 +93,6 @@
94#endif 93#endif
95 94
96/********************************** Misc Macros *******************************/ 95/********************************** Misc Macros *******************************/
97#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
98#define powerof2(x) ((((x)-1)&(x))==0) 96#define powerof2(x) ((((x)-1)&(x))==0)
99 97
100/************************* Forward Declarations *******************************/ 98/************************* Forward Declarations *******************************/
@@ -263,7 +261,6 @@ typedef enum {
263 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ 261 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
264} ahd_linux_dev_flags; 262} ahd_linux_dev_flags;
265 263
266struct ahd_linux_target;
267struct ahd_linux_device { 264struct ahd_linux_device {
268 TAILQ_ENTRY(ahd_linux_device) links; 265 TAILQ_ENTRY(ahd_linux_device) links;
269 266
@@ -343,12 +340,6 @@ struct ahd_linux_device {
343#define AHD_OTAG_THRESH 500 340#define AHD_OTAG_THRESH 500
344}; 341};
345 342
346struct ahd_linux_target {
347 struct scsi_device *sdev[AHD_NUM_LUNS];
348 struct ahd_transinfo last_tinfo;
349 struct ahd_softc *ahd;
350};
351
352/********************* Definitions Required by the Core ***********************/ 343/********************* Definitions Required by the Core ***********************/
353/* 344/*
354 * Number of SG segments we require. So long as the S/G segments for 345 * Number of SG segments we require. So long as the S/G segments for
@@ -865,7 +856,7 @@ ahd_freeze_scb(struct scb *scb)
865 } 856 }
866} 857}
867 858
868void ahd_platform_set_tags(struct ahd_softc *ahd, 859void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
869 struct ahd_devinfo *devinfo, ahd_queue_alg); 860 struct ahd_devinfo *devinfo, ahd_queue_alg);
870int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, 861int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
871 char channel, int lun, u_int tag, 862 char channel, int lun, u_int tag,
@@ -874,7 +865,7 @@ irqreturn_t
874 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs); 865 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
875void ahd_done(struct ahd_softc*, struct scb*); 866void ahd_done(struct ahd_softc*, struct scb*);
876void ahd_send_async(struct ahd_softc *, char channel, 867void ahd_send_async(struct ahd_softc *, char channel,
877 u_int target, u_int lun, ac_code, void *); 868 u_int target, u_int lun, ac_code);
878void ahd_print_path(struct ahd_softc *, struct scb *); 869void ahd_print_path(struct ahd_softc *, struct scb *);
879 870
880#ifdef CONFIG_PCI 871#ifdef CONFIG_PCI
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index ebbf7e4ff4cc..50a41eda580e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -342,7 +342,7 @@ ahd_pci_map_int(struct ahd_softc *ahd)
342 int error; 342 int error;
343 343
344 error = request_irq(ahd->dev_softc->irq, ahd_linux_isr, 344 error = request_irq(ahd->dev_softc->irq, ahd_linux_isr,
345 SA_SHIRQ, "aic79xx", ahd); 345 IRQF_SHARED, "aic79xx", ahd);
346 if (!error) 346 if (!error)
347 ahd->platform_data->irq = ahd->dev_softc->irq; 347 ahd->platform_data->irq = ahd->dev_softc->irq;
348 348
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 24fd59a230bf..c5f0ee591509 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -47,7 +47,7 @@ static int copy_info(struct info_str *info, char *fmt, ...);
47static void ahd_dump_target_state(struct ahd_softc *ahd, 47static void ahd_dump_target_state(struct ahd_softc *ahd,
48 struct info_str *info, 48 struct info_str *info,
49 u_int our_id, char channel, 49 u_int our_id, char channel,
50 u_int target_id, u_int target_offset); 50 u_int target_id);
51static void ahd_dump_device_state(struct info_str *info, 51static void ahd_dump_device_state(struct info_str *info,
52 struct scsi_device *sdev); 52 struct scsi_device *sdev);
53static int ahd_proc_write_seeprom(struct ahd_softc *ahd, 53static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
@@ -204,10 +204,8 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
204 204
205static void 205static void
206ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, 206ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
207 u_int our_id, char channel, u_int target_id, 207 u_int our_id, char channel, u_int target_id)
208 u_int target_offset)
209{ 208{
210 struct ahd_linux_target *targ;
211 struct scsi_target *starget; 209 struct scsi_target *starget;
212 struct ahd_initiator_tinfo *tinfo; 210 struct ahd_initiator_tinfo *tinfo;
213 struct ahd_tmode_tstate *tstate; 211 struct ahd_tmode_tstate *tstate;
@@ -218,10 +216,9 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
218 copy_info(info, "Target %d Negotiation Settings\n", target_id); 216 copy_info(info, "Target %d Negotiation Settings\n", target_id);
219 copy_info(info, "\tUser: "); 217 copy_info(info, "\tUser: ");
220 ahd_format_transinfo(info, &tinfo->user); 218 ahd_format_transinfo(info, &tinfo->user);
221 starget = ahd->platform_data->starget[target_offset]; 219 starget = ahd->platform_data->starget[target_id];
222 if (starget == NULL) 220 if (starget == NULL)
223 return; 221 return;
224 targ = scsi_transport_target_data(starget);
225 222
226 copy_info(info, "\tGoal: "); 223 copy_info(info, "\tGoal: ");
227 ahd_format_transinfo(info, &tinfo->goal); 224 ahd_format_transinfo(info, &tinfo->goal);
@@ -231,7 +228,7 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
231 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 228 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
232 struct scsi_device *dev; 229 struct scsi_device *dev;
233 230
234 dev = targ->sdev[lun]; 231 dev = scsi_device_lookup_by_target(starget, lun);
235 232
236 if (dev == NULL) 233 if (dev == NULL)
237 continue; 234 continue;
@@ -355,7 +352,7 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
355 copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n", 352 copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
356 ahd->scb_data.numscbs, AHD_NSEG); 353 ahd->scb_data.numscbs, AHD_NSEG);
357 354
358 max_targ = 15; 355 max_targ = 16;
359 356
360 if (ahd->seep_config == NULL) 357 if (ahd->seep_config == NULL)
361 copy_info(&info, "No Serial EEPROM\n"); 358 copy_info(&info, "No Serial EEPROM\n");
@@ -373,12 +370,12 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
373 copy_info(&info, "\n"); 370 copy_info(&info, "\n");
374 371
375 if ((ahd->features & AHD_WIDE) == 0) 372 if ((ahd->features & AHD_WIDE) == 0)
376 max_targ = 7; 373 max_targ = 8;
377 374
378 for (i = 0; i <= max_targ; i++) { 375 for (i = 0; i < max_targ; i++) {
379 376
380 ahd_dump_target_state(ahd, &info, ahd->our_id, 'A', 377 ahd_dump_target_state(ahd, &info, ahd->our_id, 'A',
381 /*target_id*/i, /*target_offset*/i); 378 /*target_id*/i);
382 } 379 }
383 retval = info.pos > info.offset ? info.pos - info.offset : 0; 380 retval = info.pos > info.offset ? info.pos - info.offset : 0;
384done: 381done:
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index debf3e2a0798..64c8b88a429f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -341,7 +341,7 @@ MODULE_LICENSE("Dual BSD/GPL");
341MODULE_VERSION(AIC7XXX_DRIVER_VERSION); 341MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
342module_param(aic7xxx, charp, 0444); 342module_param(aic7xxx, charp, 0444);
343MODULE_PARM_DESC(aic7xxx, 343MODULE_PARM_DESC(aic7xxx,
344"period delimited, options string.\n" 344"period-delimited options string:\n"
345" verbose Enable verbose/diagnostic logging\n" 345" verbose Enable verbose/diagnostic logging\n"
346" allow_memio Allow device registers to be memory mapped\n" 346" allow_memio Allow device registers to be memory mapped\n"
347" debug Bitmask of debug values to enable\n" 347" debug Bitmask of debug values to enable\n"
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx,
353" periodically to prevent tag starvation.\n" 353" periodically to prevent tag starvation.\n"
354" This may be required by some older disk\n" 354" This may be required by some older disk\n"
355" drives or RAID arrays.\n" 355" drives or RAID arrays.\n"
356" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
357" tag_info:<tag_str> Set per-target tag depth\n" 356" tag_info:<tag_str> Set per-target tag depth\n"
358" global_tag_depth:<int> Global tag depth for every target\n" 357" global_tag_depth:<int> Global tag depth for every target\n"
359" on every bus\n" 358" on every bus\n"
@@ -2540,15 +2539,28 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
2540static void ahc_linux_get_signalling(struct Scsi_Host *shost) 2539static void ahc_linux_get_signalling(struct Scsi_Host *shost)
2541{ 2540{
2542 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; 2541 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
2543 u8 mode = ahc_inb(ahc, SBLKCTL); 2542 unsigned long flags;
2543 u8 mode;
2544 2544
2545 if (mode & ENAB40) 2545 if (!(ahc->features & AHC_ULTRA2)) {
2546 spi_signalling(shost) = SPI_SIGNAL_LVD; 2546 /* non-LVD chipset, may not have SBLKCTL reg */
2547 else if (mode & ENAB20)
2548 spi_signalling(shost) = 2547 spi_signalling(shost) =
2549 ahc->features & AHC_HVD ? 2548 ahc->features & AHC_HVD ?
2550 SPI_SIGNAL_HVD : 2549 SPI_SIGNAL_HVD :
2551 SPI_SIGNAL_SE; 2550 SPI_SIGNAL_SE;
2551 return;
2552 }
2553
2554 ahc_lock(ahc, &flags);
2555 ahc_pause(ahc);
2556 mode = ahc_inb(ahc, SBLKCTL);
2557 ahc_unpause(ahc);
2558 ahc_unlock(ahc, &flags);
2559
2560 if (mode & ENAB40)
2561 spi_signalling(shost) = SPI_SIGNAL_LVD;
2562 else if (mode & ENAB20)
2563 spi_signalling(shost) = SPI_SIGNAL_SE;
2552 else 2564 else
2553 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; 2565 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
2554} 2566}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index a20b08c9ff15..d42a71ee076d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -59,7 +59,6 @@
59#ifndef _AIC7XXX_LINUX_H_ 59#ifndef _AIC7XXX_LINUX_H_
60#define _AIC7XXX_LINUX_H_ 60#define _AIC7XXX_LINUX_H_
61 61
62#include <linux/config.h>
63#include <linux/types.h> 62#include <linux/types.h>
64#include <linux/blkdev.h> 63#include <linux/blkdev.h>
65#include <linux/delay.h> 64#include <linux/delay.h>
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0c9c2f400bf6..7e42f07a27f3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -375,7 +375,7 @@ ahc_pci_map_int(struct ahc_softc *ahc)
375 int error; 375 int error;
376 376
377 error = request_irq(ahc->dev_softc->irq, ahc_linux_isr, 377 error = request_irq(ahc->dev_softc->irq, ahc_linux_isr,
378 SA_SHIRQ, "aic7xxx", ahc); 378 IRQF_SHARED, "aic7xxx", ahc);
379 if (error == 0) 379 if (error == 0)
380 ahc->platform_data->irq = ahc->dev_softc->irq; 380 ahc->platform_data->irq = ahc->dev_softc->irq;
381 381
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 8c91fda6482c..b98c5c1056c3 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -14,6 +14,8 @@ LIBS= -ldb
14clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) 14clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
15# Override default kernel CFLAGS. This is a userland app. 15# Override default kernel CFLAGS. This is a userland app.
16AICASM_CFLAGS:= -I/usr/include -I. 16AICASM_CFLAGS:= -I/usr/include -I.
17LEX= flex
18YACC= bison
17YFLAGS= -d 19YFLAGS= -d
18 20
19NOMAN= noman 21NOMAN= noman
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 5dba1c63122e..5dcef48d414f 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -249,8 +249,6 @@
249#include <linux/stat.h> 249#include <linux/stat.h>
250#include <linux/slab.h> /* for kmalloc() */ 250#include <linux/slab.h> /* for kmalloc() */
251 251
252#include <linux/config.h> /* for CONFIG_PCI */
253
254#define AIC7XXX_C_VERSION "5.2.6" 252#define AIC7XXX_C_VERSION "5.2.6"
255 253
256#define ALL_TARGETS -1 254#define ALL_TARGETS -1
@@ -8322,11 +8320,11 @@ aic7xxx_register(struct scsi_host_template *template, struct aic7xxx_host *p,
8322 } 8320 }
8323 else 8321 else
8324 { 8322 {
8325 result = (request_irq(p->irq, do_aic7xxx_isr, SA_SHIRQ, 8323 result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_SHARED,
8326 "aic7xxx", p)); 8324 "aic7xxx", p));
8327 if (result < 0) 8325 if (result < 0)
8328 { 8326 {
8329 result = (request_irq(p->irq, do_aic7xxx_isr, SA_INTERRUPT | SA_SHIRQ, 8327 result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_DISABLED | IRQF_SHARED,
8330 "aic7xxx", p)); 8328 "aic7xxx", p));
8331 } 8329 }
8332 } 8330 }
@@ -9196,7 +9194,7 @@ aic7xxx_detect(struct scsi_host_template *template)
9196 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++) 9194 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++)
9197 { 9195 {
9198 pdev = NULL; 9196 pdev = NULL;
9199 while ((pdev = pci_find_device(aic_pdevs[i].vendor_id, 9197 while ((pdev = pci_get_device(aic_pdevs[i].vendor_id,
9200 aic_pdevs[i].device_id, 9198 aic_pdevs[i].device_id,
9201 pdev))) { 9199 pdev))) {
9202 if (pci_enable_device(pdev)) 9200 if (pci_enable_device(pdev))
@@ -9653,6 +9651,9 @@ aic7xxx_detect(struct scsi_host_template *template)
9653 */ 9651 */
9654 aic7xxx_configure_bugs(temp_p); 9652 aic7xxx_configure_bugs(temp_p);
9655 9653
9654 /* Hold a pci device reference */
9655 pci_dev_get(temp_p->pdev);
9656
9656 if ( list_p == NULL ) 9657 if ( list_p == NULL )
9657 { 9658 {
9658 list_p = current_p = temp_p; 9659 list_p = current_p = temp_p;
@@ -10989,8 +10990,10 @@ aic7xxx_release(struct Scsi_Host *host)
10989 if(!p->pdev) 10990 if(!p->pdev)
10990 release_region(p->base, MAXREG - MINREG); 10991 release_region(p->base, MAXREG - MINREG);
10991#ifdef CONFIG_PCI 10992#ifdef CONFIG_PCI
10992 else 10993 else {
10993 pci_release_regions(p->pdev); 10994 pci_release_regions(p->pdev);
10995 pci_dev_put(p->pdev);
10996 }
10994#endif 10997#endif
10995 prev = NULL; 10998 prev = NULL;
10996 next = first_aic7xxx; 10999 next = first_aic7xxx;
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
index 3bf334931a8a..b07e4f04fd00 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
@@ -29,7 +29,6 @@
29 * $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $ 29 * $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $
30 *-M*************************************************************************/ 30 *-M*************************************************************************/
31 31
32#include <linux/config.h>
33 32
34#define BLS (&aic7xxx_buffer[size]) 33#define BLS (&aic7xxx_buffer[size])
35#define HDRB \ 34#define HDRB \
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
new file mode 100644
index 000000000000..0ed391d8ee84
--- /dev/null
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -0,0 +1,41 @@
1#
2# Kernel configuration file for aic94xx SAS/SATA driver.
3#
4# Copyright (c) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with Aic94xx Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_AIC94XX
28 tristate "Adaptec AIC94xx SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 help
32 This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X
33 AIC94xx chip based host adapters.
34
35config AIC94XX_DEBUG
36 bool "Compile in debug mode"
37 default y
38 depends on SCSI_AIC94XX
39 help
40 Compiles the aic94xx driver in debug mode. In debug mode,
41 the driver prints some messages to the console.
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
new file mode 100644
index 000000000000..e6b70123940c
--- /dev/null
+++ b/drivers/scsi/aic94xx/Makefile
@@ -0,0 +1,39 @@
1#
2# Makefile for Adaptec aic94xx SAS/SATA driver.
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with the aic94xx driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25ifeq ($(CONFIG_AIC94XX_DEBUG),y)
26 EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT
27endif
28
29obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o
30aic94xx-y += aic94xx_init.o \
31 aic94xx_hwi.o \
32 aic94xx_reg.o \
33 aic94xx_sds.o \
34 aic94xx_seq.o \
35 aic94xx_dump.o \
36 aic94xx_scb.o \
37 aic94xx_dev.o \
38 aic94xx_tmf.o \
39 aic94xx_task.o
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
new file mode 100644
index 000000000000..71a031df7a34
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -0,0 +1,114 @@
1/*
2 * Aic94xx SAS/SATA driver header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx.h#31 $
26 */
27
28#ifndef _AIC94XX_H_
29#define _AIC94XX_H_
30
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <scsi/libsas.h>
34
35#define ASD_DRIVER_NAME "aic94xx"
36#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver"
37
38#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__)
39
40#ifdef ASD_ENTER_EXIT
41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
42 __FUNCTION__)
43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
44 __FUNCTION__)
45#else
46#define ENTER
47#define EXIT
48#endif
49
50#ifdef ASD_DEBUG
51#define ASD_DPRINTK asd_printk
52#else
53#define ASD_DPRINTK(fmt, ...)
54#endif
55
56/* 2*ITNL timeout + 1 second */
57#define AIC94XX_SCB_TIMEOUT (5*HZ)
58
59extern kmem_cache_t *asd_dma_token_cache;
60extern kmem_cache_t *asd_ascb_cache;
61extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
62
63static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
64{
65 int i;
66 for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
67 snprintf(p, 3, "%02X", sas_addr[i]);
68 *p = '\0';
69}
70
71static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p)
72{
73 int i;
74 for (i = 0; i < SAS_ADDR_SIZE; i++) {
75 u8 h, l;
76 if (!*p)
77 break;
78 h = isdigit(*p) ? *p-'0' : *p-'A'+10;
79 p++;
80 l = isdigit(*p) ? *p-'0' : *p-'A'+10;
81 p++;
82 sas_addr[i] = (h<<4) | l;
83 }
84}
85
86struct asd_ha_struct;
87struct asd_ascb;
88
89int asd_read_ocm(struct asd_ha_struct *asd_ha);
90int asd_read_flash(struct asd_ha_struct *asd_ha);
91
92int asd_dev_found(struct domain_device *dev);
93void asd_dev_gone(struct domain_device *dev);
94
95void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
96
97int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags);
98
99/* ---------- TMFs ---------- */
100int asd_abort_task(struct sas_task *);
101int asd_abort_task_set(struct domain_device *, u8 *lun);
102int asd_clear_aca(struct domain_device *, u8 *lun);
103int asd_clear_task_set(struct domain_device *, u8 *lun);
104int asd_lu_reset(struct domain_device *, u8 *lun);
105int asd_query_task(struct sas_task *);
106
107/* ---------- Adapter and Port management ---------- */
108int asd_clear_nexus_port(struct asd_sas_port *port);
109int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha);
110
111/* ---------- Phy Management ---------- */
112int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg);
113
114#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
new file mode 100644
index 000000000000..6f8901b748f7
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -0,0 +1,353 @@
1/*
2 * Aic94xx SAS/SATA DDB management
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx_dev.c#21 $
26 */
27
28#include "aic94xx.h"
29#include "aic94xx_hwi.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_sas.h"
32
33#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \
34 (_ha)->hw_prof.max_ddbs)
35#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
36#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
37
38static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
39{
40 unsigned long flags;
41 int ddb, i;
42
43 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
44 ddb = FIND_FREE_DDB(asd_ha);
45 if (ddb >= asd_ha->hw_prof.max_ddbs) {
46 ddb = -ENOMEM;
47 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
48 goto out;
49 }
50 SET_DDB(ddb, asd_ha);
51 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
52
53 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
54 asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
55out:
56 return ddb;
57}
58
59#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag)
60#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr)
61#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head)
62#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type)
63#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask)
64#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags)
65#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2)
66#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail)
67#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail)
68#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb)
69#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn)
70#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts)
71#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr)
72#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask)
73#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags)
74#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status)
75#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr)
76#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout)
77
78static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
79{
80 unsigned long flags;
81
82 if (!ddb || ddb >= 0xFFFF)
83 return;
84 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
85 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
86 CLEAR_DDB(ddb, asd_ha);
87 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
88}
89
90static inline void asd_set_ddb_type(struct domain_device *dev)
91{
92 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
93 int ddb = (int) (unsigned long) dev->lldd_dev;
94
95 if (dev->dev_type == SATA_PM_PORT)
96 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
97 else if (dev->tproto)
98 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
99 else
100 asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR);
101}
102
103static int asd_init_sata_tag_ddb(struct domain_device *dev)
104{
105 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
106 int ddb, i;
107
108 ddb = asd_get_ddb(asd_ha);
109 if (ddb < 0)
110 return ddb;
111
112 for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2)
113 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
114
115 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
116 SISTER_DDB, ddb);
117 return 0;
118}
119
120static inline int asd_init_sata(struct domain_device *dev)
121{
122 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
123 int ddb = (int) (unsigned long) dev->lldd_dev;
124 u32 qdepth = 0;
125 int res = 0;
126
127 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
128 if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
129 dev->sata_dev.identify_device &&
130 dev->sata_dev.identify_device[10] != 0) {
131 u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
132 u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
133
134 if (w76 & 0x100) /* NCQ? */
135 qdepth = (w75 & 0x1F) + 1;
136 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
137 (1<<qdepth)-1);
138 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
139 }
140 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
141 dev->dev_type == SATA_PM_PORT) {
142 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
143 dev->frame_rcvd;
144 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
145 }
146 asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
147 if (qdepth > 0)
148 res = asd_init_sata_tag_ddb(dev);
149 return res;
150}
151
152static int asd_init_target_ddb(struct domain_device *dev)
153{
154 int ddb, i;
155 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
156 u8 flags = 0;
157
158 ddb = asd_get_ddb(asd_ha);
159 if (ddb < 0)
160 return ddb;
161
162 dev->lldd_dev = (void *) (unsigned long) ddb;
163
164 asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE);
165 asd_ddbsite_write_byte(asd_ha, ddb, 1, 0);
166 asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF);
167 for (i = 0; i < SAS_ADDR_SIZE; i++)
168 asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i,
169 dev->sas_addr[i]);
170 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF);
171 asd_set_ddb_type(dev);
172 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
173 if (dev->port->oob_mode != SATA_OOB_MODE) {
174 flags |= OPEN_REQUIRED;
175 if ((dev->dev_type == SATA_DEV) ||
176 (dev->tproto & SAS_PROTO_STP)) {
177 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
178 if (rps_resp->frame_type == SMP_RESPONSE &&
179 rps_resp->function == SMP_REPORT_PHY_SATA &&
180 rps_resp->result == SMP_RESP_FUNC_ACC) {
181 if (rps_resp->rps.affil_valid)
182 flags |= STP_AFFIL_POL;
183 if (rps_resp->rps.affil_supp)
184 flags |= SUPPORTS_AFFIL;
185 }
186 } else {
187 flags |= CONCURRENT_CONN_SUPP;
188 if (!dev->parent &&
189 (dev->dev_type == EDGE_DEV ||
190 dev->dev_type == FANOUT_DEV))
191 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
192 4);
193 else
194 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
195 dev->pathways);
196 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
197 }
198 }
199 if (dev->dev_type == SATA_PM)
200 flags |= SATA_MULTIPORT;
201 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
202
203 flags = 0;
204 if (dev->tproto & SAS_PROTO_STP)
205 flags |= STP_CL_POL_NO_TX;
206 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
207
208 asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF);
209 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
210 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
211
212 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) {
213 i = asd_init_sata(dev);
214 if (i < 0) {
215 asd_free_ddb(asd_ha, ddb);
216 return i;
217 }
218 }
219
220 if (dev->dev_type == SAS_END_DEV) {
221 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
222 if (rdev->I_T_nexus_loss_timeout > 0)
223 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
224 min(rdev->I_T_nexus_loss_timeout,
225 (u16)ITNL_TIMEOUT_CONST));
226 else
227 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
228 (u16)ITNL_TIMEOUT_CONST);
229 }
230 return 0;
231}
232
233static int asd_init_sata_pm_table_ddb(struct domain_device *dev)
234{
235 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
236 int ddb, i;
237
238 ddb = asd_get_ddb(asd_ha);
239 if (ddb < 0)
240 return ddb;
241
242 for (i = 0; i < 32; i += 2)
243 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
244
245 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
246 SISTER_DDB, ddb);
247
248 return 0;
249}
250
251#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags)
252#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb)
253
254/**
255 * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port
256 * dev: pointer to domain device
257 *
258 * For SATA Port Multiplier Ports we need to allocate one SATA Port
259 * Multiplier Port DDB and depending on whether the target on it
260 * supports SATA II NCQ, one SATA Tag DDB.
261 */
262static int asd_init_sata_pm_port_ddb(struct domain_device *dev)
263{
264 int ddb, i, parent_ddb, pmtable_ddb;
265 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
266 u8 flags;
267
268 ddb = asd_get_ddb(asd_ha);
269 if (ddb < 0)
270 return ddb;
271
272 asd_set_ddb_type(dev);
273 flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET;
274 asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags);
275 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
276 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
277 asd_init_sata(dev);
278
279 parent_ddb = (int) (unsigned long) dev->parent->lldd_dev;
280 asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb);
281 pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB);
282 asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb);
283
284 if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) {
285 i = asd_init_sata_tag_ddb(dev);
286 if (i < 0) {
287 asd_free_ddb(asd_ha, ddb);
288 return i;
289 }
290 }
291 return 0;
292}
293
294static int asd_init_initiator_ddb(struct domain_device *dev)
295{
296 return -ENODEV;
297}
298
299/**
300 * asd_init_sata_pm_ddb -- SATA Port Multiplier
301 * dev: pointer to domain device
302 *
303 * For STP and direct-attached SATA Port Multipliers we need
304 * one target port DDB entry and one SATA PM table DDB entry.
305 */
306static int asd_init_sata_pm_ddb(struct domain_device *dev)
307{
308 int res = 0;
309
310 res = asd_init_target_ddb(dev);
311 if (res)
312 goto out;
313 res = asd_init_sata_pm_table_ddb(dev);
314 if (res)
315 asd_free_ddb(dev->port->ha->lldd_ha,
316 (int) (unsigned long) dev->lldd_dev);
317out:
318 return res;
319}
320
321int asd_dev_found(struct domain_device *dev)
322{
323 int res = 0;
324
325 switch (dev->dev_type) {
326 case SATA_PM:
327 res = asd_init_sata_pm_ddb(dev);
328 break;
329 case SATA_PM_PORT:
330 res = asd_init_sata_pm_port_ddb(dev);
331 break;
332 default:
333 if (dev->tproto)
334 res = asd_init_target_ddb(dev);
335 else
336 res = asd_init_initiator_ddb(dev);
337 }
338 return res;
339}
340
341void asd_dev_gone(struct domain_device *dev)
342{
343 int ddb, sister_ddb;
344 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
345
346 ddb = (int) (unsigned long) dev->lldd_dev;
347 sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
348
349 if (sister_ddb != 0xFFFF)
350 asd_free_ddb(asd_ha, sister_ddb);
351 asd_free_ddb(asd_ha, ddb);
352 dev->lldd_dev = NULL;
353}
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
new file mode 100644
index 000000000000..e6ade5996d95
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -0,0 +1,959 @@
1/*
2 * Aic94xx SAS/SATA driver dump interface.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * 2005/07/14/LT Complete overhaul of this file. Update pages, register
27 * locations, names, etc. Make use of macros. Print more information.
28 * Print all cseq and lseq mip and mdp.
29 *
30 */
31
32#include "linux/pci.h"
33#include "aic94xx.h"
34#include "aic94xx_reg.h"
35#include "aic94xx_reg_def.h"
36#include "aic94xx_sas.h"
37
38#include "aic94xx_dump.h"
39
40#ifdef ASD_DEBUG
41
42#define MD(x) (1 << (x))
43#define MODE_COMMON (1 << 31)
44#define MODE_0_7 (0xFF)
45
46static const struct lseq_cio_regs {
47 char *name;
48 u32 offs;
49 u8 width;
50 u32 mode;
51} LSEQmCIOREGS[] = {
52 {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
53 {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
54 {"LmREQMBX", 0x30, 32, MODE_COMMON },
55 {"LmRSPMBX", 0x34, 32, MODE_COMMON },
56 {"LmMnINT", 0x38, 32, MODE_0_7 },
57 {"LmMnINTEN", 0x3C, 32, MODE_0_7 },
58 {"LmXMTPRIMD", 0x40, 32, MODE_COMMON },
59 {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON },
60 {"LmCONSTAT", 0x45, 8, MODE_COMMON },
61 {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) },
62 {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) },
63 {"LmMnEXPHDRP", 0x48, 8, MD(0) },
64 {"LmMnSASAALIGN", 0x48, 8, MD(1) },
65 {"LmMnMSKHDRP", 0x49, 8, MD(0) },
66 {"LmMnSTPALIGN", 0x49, 8, MD(1) },
67 {"LmMnRCVHDRP", 0x4A, 8, MD(0) },
68 {"LmMnXMTHDRP", 0x4A, 8, MD(1) },
69 {"LmALIGNMODE", 0x4B, 8, MD(1) },
70 {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) },
71 {"LmMnXMTCNT", 0x4C, 32, MD(1) },
72 {"LmMnCURRTAG", 0x54, 16, MD(0) },
73 {"LmMnPREVTAG", 0x56, 16, MD(0) },
74 {"LmMnACKOFS", 0x58, 8, MD(1) },
75 {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) },
76 {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) },
77 {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) },
78 {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) },
79 {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) },
80 {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) },
81 {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) },
82 {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) },
83 {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) },
84 {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) },
85 {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) },
86 {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) },
87 {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) },
88 {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) },
89 {"LmMnXMTCRC", 0x74, 32, MD(1) },
90 {"LmCURRTAG", 0x74, 16, MD(0) },
91 {"LmPREVTAG", 0x76, 16, MD(0) },
92 {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) },
93 {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON },
94 {"LmMnHOLDLVL", 0x7D, 8, MD(0) },
95 {"LmMnSATAFS", 0x7E, 8, MD(1) },
96 {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) },
97 {"LmPRMSTAT0", 0x80, 32, MODE_COMMON },
98 {"LmPRMSTAT1", 0x84, 32, MODE_COMMON },
99 {"LmGPRMINT", 0x88, 8, MODE_COMMON },
100 {"LmMnCURRSCB", 0x8A, 16, MD(0) },
101 {"LmPRMICODE", 0x8C, 32, MODE_COMMON },
102 {"LmMnRCVCNT", 0x90, 16, MD(0) },
103 {"LmMnBUFSTAT", 0x92, 16, MD(0) },
104 {"LmMnXMTHDRSIZE",0x92, 8, MD(1) },
105 {"LmMnXMTSIZE", 0x93, 8, MD(1) },
106 {"LmMnTGTXFRCNT", 0x94, 32, MD(0) },
107 {"LmMnEXPROFS", 0x98, 32, MD(0) },
108 {"LmMnXMTROFS", 0x98, 32, MD(1) },
109 {"LmMnRCVROFS", 0x9C, 32, MD(0) },
110 {"LmCONCTL", 0xA0, 16, MODE_COMMON },
111 {"LmBITLTIMER", 0xA2, 16, MODE_COMMON },
112 {"LmWWNLOW", 0xA8, 32, MODE_COMMON },
113 {"LmWWNHIGH", 0xAC, 32, MODE_COMMON },
114 {"LmMnFRMERR", 0xB0, 32, MD(0) },
115 {"LmMnFRMERREN", 0xB4, 32, MD(0) },
116 {"LmAWTIMER", 0xB8, 16, MODE_COMMON },
117 {"LmAWTCTL", 0xBA, 8, MODE_COMMON },
118 {"LmMnHDRCMPS", 0xC0, 32, MD(0) },
119 {"LmMnXMTSTAT", 0xC4, 8, MD(1) },
120 {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON },
121 {"LmMnRRDYRC", 0xC6, 8, MD(0) },
122 {"LmMnRRDYTC", 0xC6, 8, MD(1) },
123 {"LmHWTSTAT", 0xC7, 8, MODE_COMMON },
124 {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) },
125 {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON },
126 {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) },
127 {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) },
128 {"LmXXXPRIM", 0xD4, 32, MODE_COMMON },
129 {"LmRCVASTAT", 0xD9, 8, MODE_COMMON },
130 {"LmINTDIS1", 0xDA, 8, MODE_COMMON },
131 {"LmPSTORESEL", 0xDB, 8, MODE_COMMON },
132 {"LmPSTORE", 0xDC, 32, MODE_COMMON },
133 {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON },
134 {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON },
135 {"LmDONETCTL", 0xF2, 16, MODE_COMMON },
136 {NULL, 0, 0, 0 }
137};
138/*
139static struct lseq_cio_regs LSEQmOOBREGS[] = {
140 {"OOB_BFLTR" ,0x100, 8, MD(5)},
141 {"OOB_INIT_MIN" ,0x102,16, MD(5)},
142 {"OOB_INIT_MAX" ,0x104,16, MD(5)},
143 {"OOB_INIT_NEG" ,0x106,16, MD(5)},
144 {"OOB_SAS_MIN" ,0x108,16, MD(5)},
145 {"OOB_SAS_MAX" ,0x10A,16, MD(5)},
146 {"OOB_SAS_NEG" ,0x10C,16, MD(5)},
147 {"OOB_WAKE_MIN" ,0x10E,16, MD(5)},
148 {"OOB_WAKE_MAX" ,0x110,16, MD(5)},
149 {"OOB_WAKE_NEG" ,0x112,16, MD(5)},
150 {"OOB_IDLE_MAX" ,0x114,16, MD(5)},
151 {"OOB_BURST_MAX" ,0x116,16, MD(5)},
152 {"OOB_XMIT_BURST" ,0x118, 8, MD(5)},
153 {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)},
154 {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)},
155 {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)},
156 {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)},
157 {"OOB_SAS_NEGO" ,0x120, 8, MD(5)},
158 {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)},
159 {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)},
160 {"OOB_DATA_KBITS" ,0x126, 8, MD(5)},
161 {"OOB_BURST_DATA" ,0x128,32, MD(5)},
162 {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)},
163 {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)},
164 {"OOB_SYNC_DATA" ,0x134,32, MD(5)},
165 {"OOB_D10_2_DATA" ,0x138,32, MD(5)},
166 {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)},
167 {"OOB_SIG_GEN" ,0x140, 8, MD(5)},
168 {"OOB_XMIT" ,0x141, 8, MD(5)},
169 {"FUNCTION_MAKS" ,0x142, 8, MD(5)},
170 {"OOB_MODE" ,0x143, 8, MD(5)},
171 {"CURRENT_STATUS" ,0x144, 8, MD(5)},
172 {"SPEED_MASK" ,0x145, 8, MD(5)},
173 {"PRIM_COUNT" ,0x146, 8, MD(5)},
174 {"OOB_SIGNALS" ,0x148, 8, MD(5)},
175 {"OOB_DATA_DET" ,0x149, 8, MD(5)},
176 {"OOB_TIME_OUT" ,0x14C, 8, MD(5)},
177 {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)},
178 {"OOB_STATUS" ,0x14E, 8, MD(5)},
179 {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)},
180 {"RCD_DELAY" ,0x151, 8, MD(5)},
181 {"COMSAS_TIMER" ,0x152, 8, MD(5)},
182 {"SNTT_DELAY" ,0x153, 8, MD(5)},
183 {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)},
184 {"SNLT_DELAY" ,0x155, 8, MD(5)},
185 {"SNWT_DELAY" ,0x156, 8, MD(5)},
186 {"ALIGN_DELAY" ,0x157, 8, MD(5)},
187 {"INT_ENABLE_0" ,0x158, 8, MD(5)},
188 {"INT_ENABLE_1" ,0x159, 8, MD(5)},
189 {"INT_ENABLE_2" ,0x15A, 8, MD(5)},
190 {"INT_ENABLE_3" ,0x15B, 8, MD(5)},
191 {"OOB_TEST_REG" ,0x15C, 8, MD(5)},
192 {"PHY_CONTROL_0" ,0x160, 8, MD(5)},
193 {"PHY_CONTROL_1" ,0x161, 8, MD(5)},
194 {"PHY_CONTROL_2" ,0x162, 8, MD(5)},
195 {"PHY_CONTROL_3" ,0x163, 8, MD(5)},
196 {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)},
197 {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)},
198 {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)},
199 {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)},
200 {"PHY_CONTROL_4" ,0x168, 8, MD(5)},
201 {"PHY_TEST" ,0x169, 8, MD(5)},
202 {"PHY_PWR_CTL" ,0x16A, 8, MD(5)},
203 {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)},
204 {"OOB_SM_CON" ,0x16C, 8, MD(5)},
205 {"ADDR_TRAP_1" ,0x16D, 8, MD(5)},
206 {"ADDR_NEXT_1" ,0x16E, 8, MD(5)},
207 {"NEXT_ST_1" ,0x16F, 8, MD(5)},
208 {"OOB_SM_STATE" ,0x170, 8, MD(5)},
209 {"ADDR_TRAP_2" ,0x171, 8, MD(5)},
210 {"ADDR_NEXT_2" ,0x172, 8, MD(5)},
211 {"NEXT_ST_2" ,0x173, 8, MD(5)},
212 {NULL, 0, 0, 0 }
213};
214*/
215#define STR_8BIT " %30s[0x%04x]:0x%02x\n"
216#define STR_16BIT " %30s[0x%04x]:0x%04x\n"
217#define STR_32BIT " %30s[0x%04x]:0x%08x\n"
218#define STR_64BIT " %30s[0x%04x]:0x%llx\n"
219
220#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \
221 asd_read_reg_byte(_ha, _r))
222#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \
223 asd_read_reg_word(_ha, _r))
224#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \
225 asd_read_reg_dword(_ha, _r))
226
227#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \
228 asd_read_reg_byte(_ha, C##_n))
229#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \
230 asd_read_reg_word(_ha, C##_n))
231#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \
232 asd_read_reg_dword(_ha, C##_n))
233
234#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n"
235#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n"
236#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n"
237
238#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \
239 asd_read_reg_byte(_ha, _r))
240#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \
241 asd_read_reg_word(_ha, _r))
242#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \
243 asd_read_reg_dword(_ha, _r))
244
245/* can also be used for MD when the register is mode aware already */
246#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\
247 asd_read_reg_byte(_ha, CSEQ_##_n))
248#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
249 asd_read_reg_word(_ha, CSEQ_##_n))
250#define PRINT_MIS_dword(_ha, _n) \
251 asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
252 asd_read_reg_dword(_ha, CSEQ_##_n))
253#define PRINT_MIS_qword(_ha, _n) \
254 asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \
255 (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \
256 | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32)))
257
258#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n)
259#define PRINT_CMDP_word(_ha, _n) \
260asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
261 #_n, \
262 asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \
263 asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \
264 asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \
265 asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \
266 asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \
267 asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \
268 asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \
269 asd_read_reg_word(_ha, CMDP_REG(_n, 7)))
270
271#define PRINT_CMDP_byte(_ha, _n) \
272asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
273 #_n, \
274 asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \
275 asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \
276 asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \
277 asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \
278 asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \
279 asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \
280 asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \
281 asd_read_reg_byte(_ha, CMDP_REG(_n, 7)))
282
283static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha)
284{
285 int mode;
286
287 asd_printk("CSEQ STATE\n");
288
289 asd_printk("ARP2 REGISTERS\n");
290
291 PRINT_CREG_32bit(asd_ha, ARP2CTL);
292 PRINT_CREG_32bit(asd_ha, ARP2INT);
293 PRINT_CREG_32bit(asd_ha, ARP2INTEN);
294 PRINT_CREG_8bit(asd_ha, MODEPTR);
295 PRINT_CREG_8bit(asd_ha, ALTMODE);
296 PRINT_CREG_8bit(asd_ha, FLAG);
297 PRINT_CREG_8bit(asd_ha, ARP2INTCTL);
298 PRINT_CREG_16bit(asd_ha, STACK);
299 PRINT_CREG_16bit(asd_ha, PRGMCNT);
300 PRINT_CREG_16bit(asd_ha, ACCUM);
301 PRINT_CREG_16bit(asd_ha, SINDEX);
302 PRINT_CREG_16bit(asd_ha, DINDEX);
303 PRINT_CREG_8bit(asd_ha, SINDIR);
304 PRINT_CREG_8bit(asd_ha, DINDIR);
305 PRINT_CREG_8bit(asd_ha, JUMLDIR);
306 PRINT_CREG_8bit(asd_ha, ARP2HALTCODE);
307 PRINT_CREG_16bit(asd_ha, CURRADDR);
308 PRINT_CREG_16bit(asd_ha, LASTADDR);
309 PRINT_CREG_16bit(asd_ha, NXTLADDR);
310
311 asd_printk("IOP REGISTERS\n");
312
313 PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL);
314 PRINT_CREG_32bit(asd_ha, MAPPEDSCR);
315
316 asd_printk("CIO REGISTERS\n");
317
318 for (mode = 0; mode < 9; mode++)
319 PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode));
320 PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15));
321
322 for (mode = 0; mode < 9; mode++)
323 PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode));
324 PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15));
325
326 for (mode = 0; mode < 8; mode++)
327 PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode));
328 for (mode = 0; mode < 8; mode++)
329 PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode));
330 for (mode = 0; mode < 8; mode++)
331 PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode));
332 for (mode = 0; mode < 8; mode++)
333 PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode));
334
335 PRINT_CREG_8bit(asd_ha, SCRATCHPAGE);
336 for (mode = 0; mode < 8; mode++)
337 PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE,
338 CMnSCRATCHPAGE(mode));
339
340 PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON);
341 PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK);
342 PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST);
343 PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE);
344 PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC);
345 PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS);
346
347 asd_printk("SCRATCH MEMORY\n");
348
349 asd_printk("MIP 4 >>>>>\n");
350 PRINT_MIS_word(asd_ha, Q_EXE_HEAD);
351 PRINT_MIS_word(asd_ha, Q_EXE_TAIL);
352 PRINT_MIS_word(asd_ha, Q_DONE_HEAD);
353 PRINT_MIS_word(asd_ha, Q_DONE_TAIL);
354 PRINT_MIS_word(asd_ha, Q_SEND_HEAD);
355 PRINT_MIS_word(asd_ha, Q_SEND_TAIL);
356 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD);
357 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL);
358 PRINT_MIS_word(asd_ha, Q_COPY_HEAD);
359 PRINT_MIS_word(asd_ha, Q_COPY_TAIL);
360 PRINT_MIS_word(asd_ha, REG0);
361 PRINT_MIS_word(asd_ha, REG1);
362 PRINT_MIS_dword(asd_ha, REG2);
363 PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP);
364 PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE);
365 PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT);
366
367 asd_printk("MIP 5 >>>>\n");
368 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE);
369 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT);
370 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD);
371 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL);
372 PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB);
373 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD);
374 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL);
375 PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET);
376
377 asd_printk("MIP 6 >>>>\n");
378 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0);
379 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1);
380 PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR);
381 PRINT_MIS_byte(asd_ha, INT_ROUT_MODE);
382 PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS);
383 PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX);
384 PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX);
385 PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD);
386 PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL);
387 PRINT_MIS_byte(asd_ha, FREE_SCB_MASK);
388 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD);
389 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL);
390 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD);
391 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL);
392
393 asd_printk("MIP 7 >>>>\n");
394 PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE);
395 PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT);
396 PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD);
397 PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL);
398 PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB);
399 PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD);
400 PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL);
401 PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET);
402 PRINT_MIS_word(asd_ha, PRIMITIVE_DATA);
403 PRINT_MIS_dword(asd_ha, TIMEOUT_CONST);
404
405 asd_printk("MDP 0 >>>>\n");
406 asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n",
407 "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7");
408 PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX);
409 PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR);
410 PRINT_CMDP_word(asd_ha, Q_LINK_HEAD);
411 PRINT_CMDP_word(asd_ha, Q_LINK_TAIL);
412 PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE);
413
414 asd_printk("MDP 0 Mode 8 >>>>\n");
415 PRINT_MIS_word(asd_ha, RET_ADDR);
416 PRINT_MIS_word(asd_ha, RET_SCBPTR);
417 PRINT_MIS_word(asd_ha, SAVE_SCBPTR);
418 PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX);
419 PRINT_MIS_word(asd_ha, RESP_LEN);
420 PRINT_MIS_word(asd_ha, TMF_SCBPTR);
421 PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB);
422 PRINT_MIS_word(asd_ha, GLOBAL_HEAD);
423 PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD);
424 PRINT_MIS_byte(asd_ha, TMF_OPCODE);
425 PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS);
426 PRINT_MIS_word(asd_ha, HSB_SITE);
427 PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE);
428 PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE);
429
430 asd_printk("MDP 1 Mode 8 >>>>\n");
431 PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR);
432 PRINT_MIS_qword(asd_ha, LUN_TO_CHECK);
433
434 asd_printk("MDP 2 Mode 8 >>>>\n");
435 PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER);
436 PRINT_MIS_qword(asd_ha, HQ_DONE_BASE);
437 PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER);
438 PRINT_MIS_byte(asd_ha, HQ_DONE_PASS);
439}
440
441#define PRINT_LREG_8bit(_h, _lseq, _n) \
442 asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq)))
443#define PRINT_LREG_16bit(_h, _lseq, _n) \
444 asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq)))
445#define PRINT_LREG_32bit(_h, _lseq, _n) \
446 asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq)))
447
448#define PRINT_LMIP_byte(_h, _lseq, _n) \
449 asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
450 asd_read_reg_byte(_h, LmSEQ_##_n(_lseq)))
451#define PRINT_LMIP_word(_h, _lseq, _n) \
452 asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
453 asd_read_reg_word(_h, LmSEQ_##_n(_lseq)))
454#define PRINT_LMIP_dword(_h, _lseq, _n) \
455 asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
456 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)))
457#define PRINT_LMIP_qword(_h, _lseq, _n) \
458 asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
459 (unsigned long long)(((unsigned long long) \
460 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \
461 | (((unsigned long long) \
462 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32)))
463
464static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha,
465 u32 lseq_cio_addr, int i)
466{
467 switch (LSEQmCIOREGS[i].width) {
468 case 8:
469 asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name,
470 LSEQmCIOREGS[i].offs,
471 asd_read_reg_byte(asd_ha, lseq_cio_addr +
472 LSEQmCIOREGS[i].offs));
473
474 break;
475 case 16:
476 asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name,
477 LSEQmCIOREGS[i].offs,
478 asd_read_reg_word(asd_ha, lseq_cio_addr +
479 LSEQmCIOREGS[i].offs));
480
481 break;
482 case 32:
483 asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name,
484 LSEQmCIOREGS[i].offs,
485 asd_read_reg_dword(asd_ha, lseq_cio_addr +
486 LSEQmCIOREGS[i].offs));
487 break;
488 }
489}
490
491static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
492{
493 u32 moffs;
494 int mode;
495
496 asd_printk("LSEQ %d STATE\n", lseq);
497
498 asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq);
499 PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL);
500 PRINT_LREG_32bit(asd_ha, lseq, ARP2INT);
501 PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN);
502 PRINT_LREG_8bit(asd_ha, lseq, MODEPTR);
503 PRINT_LREG_8bit(asd_ha, lseq, ALTMODE);
504 PRINT_LREG_8bit(asd_ha, lseq, FLAG);
505 PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL);
506 PRINT_LREG_16bit(asd_ha, lseq, STACK);
507 PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT);
508 PRINT_LREG_16bit(asd_ha, lseq, ACCUM);
509 PRINT_LREG_16bit(asd_ha, lseq, SINDEX);
510 PRINT_LREG_16bit(asd_ha, lseq, DINDEX);
511 PRINT_LREG_8bit(asd_ha, lseq, SINDIR);
512 PRINT_LREG_8bit(asd_ha, lseq, DINDIR);
513 PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR);
514 PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE);
515 PRINT_LREG_16bit(asd_ha, lseq, CURRADDR);
516 PRINT_LREG_16bit(asd_ha, lseq, LASTADDR);
517 PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR);
518
519 asd_printk("LSEQ%d: IOP REGISTERS\n", lseq);
520
521 PRINT_LREG_32bit(asd_ha, lseq, MODECTL);
522 PRINT_LREG_32bit(asd_ha, lseq, DBGMODE);
523 PRINT_LREG_32bit(asd_ha, lseq, CONTROL);
524 PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq));
525 PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq));
526
527 asd_printk("LSEQ%d: CIO REGISTERS\n", lseq);
528 asd_printk("Mode common:\n");
529
530 for (mode = 0; mode < 8; mode++) {
531 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
532 int i;
533
534 for (i = 0; LSEQmCIOREGS[i].name; i++)
535 if (LSEQmCIOREGS[i].mode == MODE_COMMON)
536 asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i);
537 }
538
539 asd_printk("Mode unique:\n");
540 for (mode = 0; mode < 8; mode++) {
541 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
542 int i;
543
544 asd_printk("Mode %d\n", mode);
545 for (i = 0; LSEQmCIOREGS[i].name; i++) {
546 if (!(LSEQmCIOREGS[i].mode & (1 << mode)))
547 continue;
548 asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i);
549 }
550 }
551
552 asd_printk("SCRATCH MEMORY\n");
553
554 asd_printk("LSEQ%d MIP 0 >>>>\n", lseq);
555 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD);
556 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL);
557 PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER);
558 PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS);
559 PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE);
560 PRINT_LMIP_word(asd_ha, lseq, CONCTL);
561 PRINT_LMIP_byte(asd_ha, lseq, CONSTAT);
562 PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES);
563 PRINT_LMIP_word(asd_ha, lseq, REG1_ISR);
564 PRINT_LMIP_word(asd_ha, lseq, REG2_ISR);
565 PRINT_LMIP_word(asd_ha, lseq, REG3_ISR);
566 PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR);
567
568 asd_printk("LSEQ%d MIP 1 >>>>\n", lseq);
569 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0);
570 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1);
571 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2);
572 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3);
573 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0);
574 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1);
575 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2);
576 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3);
577 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD);
578 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL);
579 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL);
580 PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST);
581 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX);
582 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX);
583
584 asd_printk("LSEQ%d MIP 2 >>>>\n", lseq);
585 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0);
586 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1);
587 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2);
588 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3);
589 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0);
590 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1);
591 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2);
592 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3);
593 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD);
594 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL);
595 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL);
596
597 asd_printk("LSEQ%d MIP 3 >>>>\n", lseq);
598 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST);
599 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT);
600 PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT);
601 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT);
602 PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT);
603 PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT);
604 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT);
605
606 for (mode = 0; mode < 3; mode++) {
607 asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode);
608 moffs = mode * LSEQ_MODE_SCRATCH_SIZE;
609
610 asd_printk(STR_16BIT, "RET_ADDR", 0,
611 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)
612 + moffs));
613 asd_printk(STR_16BIT, "REG0_MODE", 2,
614 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)
615 + moffs));
616 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
617 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)
618 + moffs));
619 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
620 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)
621 + moffs));
622 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
623 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)
624 + moffs));
625 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
626 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)
627 + moffs));
628 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
629 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)
630 + moffs));
631 }
632
633 asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq);
634 moffs = LSEQ_MODE5_PAGE0_OFFSET;
635 asd_printk(STR_16BIT, "RET_ADDR", 0,
636 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs));
637 asd_printk(STR_16BIT, "REG0_MODE", 2,
638 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs));
639 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
640 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs));
641 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
642 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs));
643 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
644 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs));
645 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
646 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs));
647 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
648 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs));
649
650 asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq);
651 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE);
652 PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX);
653 PRINT_LMIP_word(asd_ha, lseq, RESP_LEN);
654 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE);
655 PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE);
656 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN);
657 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL);
658 PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS);
659 PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE);
660 PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR);
661
662 asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq);
663 PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD);
664 PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX);
665 PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG);
666 PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS);
667 PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE);
668 PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS);
669 PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE);
670 PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR);
671
672 asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq);
673 PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER);
674 PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR);
675 PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE);
676 PRINT_LMIP_word(asd_ha, lseq, IP_BITL);
677 PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG);
678 PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH);
679
680 asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq);
681 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS);
682 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE);
683 PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD);
684 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR);
685 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS);
686 PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE);
687 PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT);
688 PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES);
689 PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES);
690 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT);
691 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT);
692
693 asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq);
694 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0);
695 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1);
696
697 asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq);
698 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0);
699 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1);
700
701 asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq);
702 PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT);
703 PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT);
704 PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT);
705
706 asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq);
707 PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK);
708 PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT);
709 PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK);
710 PRINT_LMIP_word(asd_ha, lseq, TAG_MASK);
711 PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG);
712 PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET);
713
714 asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq);
715 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS);
716 PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS);
717 PRINT_LMIP_word(asd_ha, lseq, SDB_DDB);
718 PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS);
719 PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG);
720
721 asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq);
722 PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME);
723 PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS);
724 PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS);
725 PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL);
726
727 asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq);
728 PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS);
729 PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS);
730 PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS);
731 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS);
732 PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS);
733
734 asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq);
735 PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS);
736 PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS);
737 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS);
738 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
739}
740
741/**
742 * asd_dump_ddb_site -- dump a CSEQ DDB site
743 * @asd_ha: pointer to host adapter structure
744 * @site_no: site number of interest
745 */
746void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
747{
748 if (site_no >= asd_ha->hw_prof.max_ddbs)
749 return;
750
751#define DDB_FIELDB(__name) \
752 asd_ddbsite_read_byte(asd_ha, site_no, \
753 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
754#define DDB2_FIELDB(__name) \
755 asd_ddbsite_read_byte(asd_ha, site_no, \
756 offsetof(struct asd_ddb_stp_sata_target_port, __name))
757#define DDB_FIELDW(__name) \
758 asd_ddbsite_read_word(asd_ha, site_no, \
759 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
760
761#define DDB_FIELDD(__name) \
762 asd_ddbsite_read_dword(asd_ha, site_no, \
763 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
764
765 asd_printk("DDB: 0x%02x\n", site_no);
766 asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
767 asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
768 asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
769 asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
770 asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
771 asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
772 asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
773 asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
774 asd_printk("Pathway Blocked Count: 0x%02x\n",
775 DDB_FIELDB(pathway_blocked_count));
776 asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
777 asd_printk("more_compat_features: 0x%08x\n",
778 DDB_FIELDD(more_compat_features));
779 asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
780 asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
781 asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
782 asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
783 asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
784 asd_printk("Active Task Count: 0x%04x\n",
785 DDB_FIELDW(active_task_count));
786 asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
787 asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
788 asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
789}
790
791void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
792{
793#define DDB0_FIELDB(__name) \
794 asd_ddbsite_read_byte(asd_ha, 0, \
795 offsetof(struct asd_ddb_seq_shared, __name))
796#define DDB0_FIELDW(__name) \
797 asd_ddbsite_read_word(asd_ha, 0, \
798 offsetof(struct asd_ddb_seq_shared, __name))
799
800#define DDB0_FIELDD(__name) \
801 asd_ddbsite_read_dword(asd_ha,0 , \
802 offsetof(struct asd_ddb_seq_shared, __name))
803
804#define DDB0_FIELDA(__name, _o) \
805 asd_ddbsite_read_byte(asd_ha, 0, \
806 offsetof(struct asd_ddb_seq_shared, __name)+_o)
807
808
809 asd_printk("DDB: 0\n");
810 asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
811 asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
812 asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
813 asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
814 asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
815 asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
816 asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
817 asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
818 asd_printk("est_nexus_buf_thresh:%04x\n",
819 DDB0_FIELDW(est_nexus_buf_thresh));
820 asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
821 asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
822 asd_printk("port_map_by_links:%02x %02x %02x %02x "
823 "%02x %02x %02x %02x\n",
824 DDB0_FIELDA(port_map_by_links, 0),
825 DDB0_FIELDA(port_map_by_links, 1),
826 DDB0_FIELDA(port_map_by_links, 2),
827 DDB0_FIELDA(port_map_by_links, 3),
828 DDB0_FIELDA(port_map_by_links, 4),
829 DDB0_FIELDA(port_map_by_links, 5),
830 DDB0_FIELDA(port_map_by_links, 6),
831 DDB0_FIELDA(port_map_by_links, 7));
832}
833
834static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
835{
836
837#define SCB_FIELDB(__name) \
838 asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
839 + offsetof(struct initiate_ssp_task, __name))
840#define SCB_FIELDW(__name) \
841 asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
842 + offsetof(struct initiate_ssp_task, __name))
843#define SCB_FIELDD(__name) \
844 asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
845 + offsetof(struct initiate_ssp_task, __name))
846
847 asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
848 asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
849 asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
850 asd_printk("Target Port Xfer Tag: 0x%04x.\n",
851 SCB_FIELDW(ssp_frame.tptt));
852 asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
853 asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
854}
855
856/**
857 * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
858 * @asd_ha: pointer to host adapter struct
859 */
860void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
861{
862 u16 site_no;
863
864 for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
865 u8 opcode;
866
867 if (!SCB_SITE_VALID(site_no))
868 continue;
869
870 /* We are only interested in SCB sites currently used.
871 */
872 opcode = asd_scbsite_read_byte(asd_ha, site_no,
873 offsetof(struct scb_header,
874 opcode));
875 if (opcode == 0xFF)
876 continue;
877
878 asd_printk("\nSCB: 0x%x\n", site_no);
879 asd_dump_scb_site(asd_ha, site_no);
880 }
881}
882
883/**
884 * ads_dump_seq_state -- dump CSEQ and LSEQ states
885 * @asd_ha: pointer to host adapter structure
886 * @lseq_mask: mask of LSEQs of interest
887 */
888void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask)
889{
890 int lseq;
891
892 asd_dump_cseq_state(asd_ha);
893
894 if (lseq_mask != 0)
895 for_each_sequencer(lseq_mask, lseq_mask, lseq)
896 asd_dump_lseq_state(asd_ha, lseq);
897}
898
899void asd_dump_frame_rcvd(struct asd_phy *phy,
900 struct done_list_struct *dl)
901{
902 unsigned long flags;
903 int i;
904
905 switch ((dl->status_block[1] & 0x70) >> 3) {
906 case SAS_PROTO_STP:
907 ASD_DPRINTK("STP proto device-to-host FIS:\n");
908 break;
909 default:
910 case SAS_PROTO_SSP:
911 ASD_DPRINTK("SAS proto IDENTIFY:\n");
912 break;
913 }
914 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
915 for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4)
916 ASD_DPRINTK("%02x: %02x %02x %02x %02x\n",
917 i,
918 phy->frame_rcvd[i],
919 phy->frame_rcvd[i+1],
920 phy->frame_rcvd[i+2],
921 phy->frame_rcvd[i+3]);
922 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
923}
924
925static inline void asd_dump_scb(struct asd_ascb *ascb, int ind)
926{
927 asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
928 "index:%d, opcode:0x%02x\n",
929 ind, ascb->dma_scb.vaddr,
930 (unsigned long long)ascb->dma_scb.dma_handle,
931 (unsigned long long)
932 le64_to_cpu(ascb->scb->header.next_scb),
933 le16_to_cpu(ascb->scb->header.index),
934 ascb->scb->header.opcode);
935}
936
937void asd_dump_scb_list(struct asd_ascb *ascb, int num)
938{
939 int i = 0;
940
941 asd_printk("dumping %d scbs:\n", num);
942
943 asd_dump_scb(ascb, i++);
944 --num;
945
946 if (num > 0 && !list_empty(&ascb->list)) {
947 struct list_head *el;
948
949 list_for_each(el, &ascb->list) {
950 struct asd_ascb *s = list_entry(el, struct asd_ascb,
951 list);
952 asd_dump_scb(s, i++);
953 if (--num <= 0)
954 break;
955 }
956 }
957}
958
959#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
new file mode 100644
index 000000000000..0c388e7da6bb
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -0,0 +1,52 @@
1/*
2 * Aic94xx SAS/SATA driver dump header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_DUMP_H_
28#define _AIC94XX_DUMP_H_
29
30#ifdef ASD_DEBUG
31
32void asd_dump_ddb_0(struct asd_ha_struct *asd_ha);
33void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no);
34void asd_dump_scb_sites(struct asd_ha_struct *asd_ha);
35void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask);
36void asd_dump_frame_rcvd(struct asd_phy *phy,
37 struct done_list_struct *dl);
38void asd_dump_scb_list(struct asd_ascb *ascb, int num);
39#else /* ASD_DEBUG */
40
41static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { }
42static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha,
43 u16 site_no) { }
44static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { }
45static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha,
46 u8 lseq_mask) { }
47static inline void asd_dump_frame_rcvd(struct asd_phy *phy,
48 struct done_list_struct *dl) { }
49static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { }
50#endif /* ASD_DEBUG */
51
52#endif /* _AIC94XX_DUMP_H_ */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
new file mode 100644
index 000000000000..1d8c5e5f442e
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -0,0 +1,1376 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33#include "aic94xx_hwi.h"
34#include "aic94xx_seq.h"
35#include "aic94xx_dump.h"
36
37u32 MBAR0_SWB_SIZE;
38
39/* ---------- Initialization ---------- */
40
41static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
42{
43 extern char sas_addr_str[];
44 /* If the user has specified a WWN it overrides other settings
45 */
46 if (sas_addr_str[0] != '\0')
47 asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr,
48 sas_addr_str);
49 else if (asd_ha->hw_prof.sas_addr[0] != 0)
50 asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr);
51}
52
53static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
54{
55 int i;
56
57 for (i = 0; i < ASD_MAX_PHYS; i++) {
58 if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
59 continue;
60 /* Set a phy's address only if it has none.
61 */
62 ASD_DPRINTK("setting phy%d addr to %llx\n", i,
63 SAS_ADDR(asd_ha->hw_prof.sas_addr));
64 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
65 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
66 }
67}
68
69/* ---------- PHY initialization ---------- */
70
71static void asd_init_phy_identify(struct asd_phy *phy)
72{
73 phy->identify_frame = phy->id_frm_tok->vaddr;
74
75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
76
77 phy->identify_frame->dev_type = SAS_END_DEV;
78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
80 if (phy->sas_phy.role & PHY_ROLE_TARGET)
81 phy->identify_frame->target_bits = phy->sas_phy.tproto;
82 memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
83 SAS_ADDR_SIZE);
84 phy->identify_frame->phy_id = phy->sas_phy.id;
85}
86
87static int asd_init_phy(struct asd_phy *phy)
88{
89 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
90 struct asd_sas_phy *sas_phy = &phy->sas_phy;
91
92 sas_phy->enabled = 1;
93 sas_phy->class = SAS;
94 sas_phy->iproto = SAS_PROTO_ALL;
95 sas_phy->tproto = 0;
96 sas_phy->type = PHY_TYPE_PHYSICAL;
97 sas_phy->role = PHY_ROLE_INITIATOR;
98 sas_phy->oob_mode = OOB_NOT_CONNECTED;
99 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
100
101 phy->id_frm_tok = asd_alloc_coherent(asd_ha,
102 sizeof(*phy->identify_frame),
103 GFP_KERNEL);
104 if (!phy->id_frm_tok) {
105 asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
106 return -ENOMEM;
107 } else
108 asd_init_phy_identify(phy);
109
110 memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
111
112 return 0;
113}
114
115static int asd_init_phys(struct asd_ha_struct *asd_ha)
116{
117 u8 i;
118 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
119
120 for (i = 0; i < ASD_MAX_PHYS; i++) {
121 struct asd_phy *phy = &asd_ha->phys[i];
122
123 phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
124
125 phy->sas_phy.enabled = 0;
126 phy->sas_phy.id = i;
127 phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
128 phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
129 phy->sas_phy.ha = &asd_ha->sas_ha;
130 phy->sas_phy.lldd_phy = phy;
131 }
132
133 /* Now enable and initialize only the enabled phys. */
134 for_each_phy(phy_mask, phy_mask, i) {
135 int err = asd_init_phy(&asd_ha->phys[i]);
136 if (err)
137 return err;
138 }
139
140 return 0;
141}
142
143/* ---------- Sliding windows ---------- */
144
145static int asd_init_sw(struct asd_ha_struct *asd_ha)
146{
147 struct pci_dev *pcidev = asd_ha->pcidev;
148 int err;
149 u32 v;
150
151 /* Unlock MBARs */
152 err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
153 if (err) {
154 asd_printk("couldn't access conf. space of %s\n",
155 pci_name(pcidev));
156 goto Err;
157 }
158 if (v)
159 err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
160 if (err) {
161 asd_printk("couldn't write to MBAR_KEY of %s\n",
162 pci_name(pcidev));
163 goto Err;
164 }
165
166 /* Set sliding windows A, B and C to point to proper internal
167 * memory regions.
168 */
169 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
170 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
171 REG_BASE_ADDR_CSEQCIO);
172 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
173 asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
174 asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
175 asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
176 MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
177 if (!asd_ha->iospace) {
178 /* MBAR1 will point to OCM (On Chip Memory) */
179 pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
180 asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
181 }
182 spin_lock_init(&asd_ha->iolock);
183Err:
184 return err;
185}
186
187/* ---------- SCB initialization ---------- */
188
189/**
190 * asd_init_scbs - manually allocate the first SCB.
191 * @asd_ha: pointer to host adapter structure
192 *
193 * This allocates the very first SCB which would be sent to the
194 * sequencer for execution. Its bus address is written to
195 * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of
196 * the _next_ scb to be DMA-ed to the host adapter is read from the last
197 * SCB DMA-ed to the host adapter, we have to always stay one step
198 * ahead of the sequencer and keep one SCB already allocated.
199 */
200static int asd_init_scbs(struct asd_ha_struct *asd_ha)
201{
202 struct asd_seq_data *seq = &asd_ha->seq;
203 int bitmap_bytes;
204
205 /* allocate the index array and bitmap */
206 asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
207 asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
208 sizeof(void *), GFP_KERNEL);
209 if (!asd_ha->seq.tc_index_array)
210 return -ENOMEM;
211
212 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
213 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
214 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
215 if (!asd_ha->seq.tc_index_bitmap)
216 return -ENOMEM;
217
218 spin_lock_init(&seq->tc_index_lock);
219
220 seq->next_scb.size = sizeof(struct scb);
221 seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
222 &seq->next_scb.dma_handle);
223 if (!seq->next_scb.vaddr) {
224 kfree(asd_ha->seq.tc_index_bitmap);
225 kfree(asd_ha->seq.tc_index_array);
226 asd_ha->seq.tc_index_bitmap = NULL;
227 asd_ha->seq.tc_index_array = NULL;
228 return -ENOMEM;
229 }
230
231 seq->pending = 0;
232 spin_lock_init(&seq->pend_q_lock);
233 INIT_LIST_HEAD(&seq->pend_q);
234
235 return 0;
236}
237
238static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
239{
240 asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
241 asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
242 ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
243 asd_ha->hw_prof.max_scbs,
244 asd_ha->hw_prof.max_ddbs);
245}
246
247/* ---------- Done List initialization ---------- */
248
249static void asd_dl_tasklet_handler(unsigned long);
250
251static int asd_init_dl(struct asd_ha_struct *asd_ha)
252{
253 asd_ha->seq.actual_dl
254 = asd_alloc_coherent(asd_ha,
255 ASD_DL_SIZE * sizeof(struct done_list_struct),
256 GFP_KERNEL);
257 if (!asd_ha->seq.actual_dl)
258 return -ENOMEM;
259 asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
260 asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
261 asd_ha->seq.dl_next = 0;
262 tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
263 (unsigned long) asd_ha);
264
265 return 0;
266}
267
268/* ---------- EDB and ESCB init ---------- */
269
270static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags)
271{
272 struct asd_seq_data *seq = &asd_ha->seq;
273 int i;
274
275 seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
276 if (!seq->edb_arr)
277 return -ENOMEM;
278
279 for (i = 0; i < seq->num_edbs; i++) {
280 seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
281 gfp_flags);
282 if (!seq->edb_arr[i])
283 goto Err_unroll;
284 memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
285 }
286
287 ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
288
289 return 0;
290
291Err_unroll:
292 for (i-- ; i >= 0; i--)
293 asd_free_coherent(asd_ha, seq->edb_arr[i]);
294 kfree(seq->edb_arr);
295 seq->edb_arr = NULL;
296
297 return -ENOMEM;
298}
299
300static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
301 gfp_t gfp_flags)
302{
303 struct asd_seq_data *seq = &asd_ha->seq;
304 struct asd_ascb *escb;
305 int i, escbs;
306
307 seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
308 gfp_flags);
309 if (!seq->escb_arr)
310 return -ENOMEM;
311
312 escbs = seq->num_escbs;
313 escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
314 if (!escb) {
315 asd_printk("couldn't allocate list of escbs\n");
316 goto Err;
317 }
318 seq->num_escbs -= escbs; /* subtract what was not allocated */
319 ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
320
321 for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
322 struct asd_ascb,
323 list)) {
324 seq->escb_arr[i] = escb;
325 escb->scb->header.opcode = EMPTY_SCB;
326 }
327
328 return 0;
329Err:
330 kfree(seq->escb_arr);
331 seq->escb_arr = NULL;
332 return -ENOMEM;
333
334}
335
336static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
337{
338 struct asd_seq_data *seq = &asd_ha->seq;
339 int i, k, z = 0;
340
341 for (i = 0; i < seq->num_escbs; i++) {
342 struct asd_ascb *ascb = seq->escb_arr[i];
343 struct empty_scb *escb = &ascb->scb->escb;
344
345 ascb->edb_index = z;
346
347 escb->num_valid = ASD_EDBS_PER_SCB;
348
349 for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
350 struct sg_el *eb = &escb->eb[k];
351 struct asd_dma_tok *edb = seq->edb_arr[z++];
352
353 memset(eb, 0, sizeof(*eb));
354 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
355 eb->size = cpu_to_le32(((u32) edb->size));
356 }
357 }
358}
359
360/**
361 * asd_init_escbs -- allocate and initialize empty scbs
362 * @asd_ha: pointer to host adapter structure
363 *
364 * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
365 * They transport sense data, etc.
366 */
367static int asd_init_escbs(struct asd_ha_struct *asd_ha)
368{
369 struct asd_seq_data *seq = &asd_ha->seq;
370 int err = 0;
371
372 /* Allocate two empty data buffers (edb) per sequencer. */
373 int edbs = 2*(1+asd_ha->hw_prof.num_phys);
374
375 seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
376 seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
377
378 err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
379 if (err) {
380 asd_printk("couldn't allocate edbs\n");
381 return err;
382 }
383
384 err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
385 if (err) {
386 asd_printk("couldn't allocate escbs\n");
387 return err;
388 }
389
390 asd_assign_edbs2escbs(asd_ha);
391 /* In order to insure that normal SCBs do not overfill sequencer
392 * memory and leave no space for escbs (halting condition),
393 * we increment pending here by the number of escbs. However,
394 * escbs are never pending.
395 */
396 seq->pending = seq->num_escbs;
397 seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
398
399 return 0;
400}
401
402/* ---------- HW initialization ---------- */
403
404/**
405 * asd_chip_hardrst -- hard reset the chip
406 * @asd_ha: pointer to host adapter structure
407 *
408 * This takes 16 cycles and is synchronous to CFCLK, which runs
409 * at 200 MHz, so this should take at most 80 nanoseconds.
410 */
411int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
412{
413 int i;
414 int count = 100;
415 u32 reg;
416
417 for (i = 0 ; i < 4 ; i++) {
418 asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
419 }
420
421 do {
422 udelay(1);
423 reg = asd_read_reg_dword(asd_ha, CHIMINT);
424 if (reg & HARDRSTDET) {
425 asd_write_reg_dword(asd_ha, CHIMINT,
426 HARDRSTDET|PORRSTDET);
427 return 0;
428 }
429 } while (--count > 0);
430
431 return -ENODEV;
432}
433
434/**
435 * asd_init_chip -- initialize the chip
436 * @asd_ha: pointer to host adapter structure
437 *
438 * Hard resets the chip, disables HA interrupts, downloads the sequnecer
439 * microcode and starts the sequencers. The caller has to explicitly
440 * enable HA interrupts with asd_enable_ints(asd_ha).
441 */
442static int asd_init_chip(struct asd_ha_struct *asd_ha)
443{
444 int err;
445
446 err = asd_chip_hardrst(asd_ha);
447 if (err) {
448 asd_printk("couldn't hard reset %s\n",
449 pci_name(asd_ha->pcidev));
450 goto out;
451 }
452
453 asd_disable_ints(asd_ha);
454
455 err = asd_init_seqs(asd_ha);
456 if (err) {
457 asd_printk("couldn't init seqs for %s\n",
458 pci_name(asd_ha->pcidev));
459 goto out;
460 }
461
462 err = asd_start_seqs(asd_ha);
463 if (err) {
464 asd_printk("coudln't start seqs for %s\n",
465 pci_name(asd_ha->pcidev));
466 goto out;
467 }
468out:
469 return err;
470}
471
472#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
473
474static int max_devs = 0;
475module_param_named(max_devs, max_devs, int, S_IRUGO);
476MODULE_PARM_DESC(max_devs, "\n"
477 "\tMaximum number of SAS devices to support (not LUs).\n"
478 "\tDefault: 2176, Maximum: 65663.\n");
479
480static int max_cmnds = 0;
481module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
482MODULE_PARM_DESC(max_cmnds, "\n"
483 "\tMaximum number of commands queuable.\n"
484 "\tDefault: 512, Maximum: 66047.\n");
485
486static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
487{
488 unsigned long dma_addr = OCM_BASE_ADDR;
489 u32 d;
490
491 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
492 asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
493 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
494 d |= 4;
495 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
496 asd_ha->hw_prof.max_ddbs += MAX_DEVS;
497}
498
499static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
500{
501 dma_addr_t dma_handle;
502 unsigned long dma_addr;
503 u32 d;
504 int size;
505
506 asd_extend_devctx_ocm(asd_ha);
507
508 asd_ha->hw_prof.ddb_ext = NULL;
509 if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
510 max_devs = asd_ha->hw_prof.max_ddbs;
511 return 0;
512 }
513
514 size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
515
516 asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
517 if (!asd_ha->hw_prof.ddb_ext) {
518 asd_printk("couldn't allocate memory for %d devices\n",
519 max_devs);
520 max_devs = asd_ha->hw_prof.max_ddbs;
521 return -ENOMEM;
522 }
523 dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
524 dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
525 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
526 dma_handle = (dma_addr_t) dma_addr;
527 asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
528 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
529 d &= ~4;
530 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
531
532 asd_ha->hw_prof.max_ddbs = max_devs;
533
534 return 0;
535}
536
537static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
538{
539 dma_addr_t dma_handle;
540 unsigned long dma_addr;
541 u32 d;
542 int size;
543
544 asd_ha->hw_prof.scb_ext = NULL;
545 if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
546 max_cmnds = asd_ha->hw_prof.max_scbs;
547 return 0;
548 }
549
550 size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
551
552 asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
553 if (!asd_ha->hw_prof.scb_ext) {
554 asd_printk("couldn't allocate memory for %d commands\n",
555 max_cmnds);
556 max_cmnds = asd_ha->hw_prof.max_scbs;
557 return -ENOMEM;
558 }
559 dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
560 dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
561 dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
562 dma_handle = (dma_addr_t) dma_addr;
563 asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
564 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
565 d &= ~1;
566 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
567
568 asd_ha->hw_prof.max_scbs = max_cmnds;
569
570 return 0;
571}
572
573/**
574 * asd_init_ctxmem -- initialize context memory
575 * asd_ha: pointer to host adapter structure
576 *
577 * This function sets the maximum number of SCBs and
578 * DDBs which can be used by the sequencer. This is normally
579 * 512 and 128 respectively. If support for more SCBs or more DDBs
580 * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
581 * initialized here to extend context memory to point to host memory,
582 * thus allowing unlimited support for SCBs and DDBs -- only limited
583 * by host memory.
584 */
585static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
586{
587 int bitmap_bytes;
588
589 asd_get_max_scb_ddb(asd_ha);
590 asd_extend_devctx(asd_ha);
591 asd_extend_cmdctx(asd_ha);
592
593 /* The kernel wants bitmaps to be unsigned long sized. */
594 bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
595 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
596 asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
597 if (!asd_ha->hw_prof.ddb_bitmap)
598 return -ENOMEM;
599 spin_lock_init(&asd_ha->hw_prof.ddb_lock);
600
601 return 0;
602}
603
604int asd_init_hw(struct asd_ha_struct *asd_ha)
605{
606 int err;
607 u32 v;
608
609 err = asd_init_sw(asd_ha);
610 if (err)
611 return err;
612
613 err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
614 if (err) {
615 asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
616 pci_name(asd_ha->pcidev));
617 return err;
618 }
619 pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
620 v | SC_TMR_DIS);
621 if (err) {
622 asd_printk("couldn't disable split completion timer of %s\n",
623 pci_name(asd_ha->pcidev));
624 return err;
625 }
626
627 err = asd_read_ocm(asd_ha);
628 if (err) {
629 asd_printk("couldn't read ocm(%d)\n", err);
630 /* While suspicios, it is not an error that we
631 * couldn't read the OCM. */
632 }
633
634 err = asd_read_flash(asd_ha);
635 if (err) {
636 asd_printk("couldn't read flash(%d)\n", err);
637 /* While suspicios, it is not an error that we
638 * couldn't read FLASH memory.
639 */
640 }
641
642 asd_init_ctxmem(asd_ha);
643
644 asd_get_user_sas_addr(asd_ha);
645 if (!asd_ha->hw_prof.sas_addr[0]) {
646 asd_printk("No SAS Address provided for %s\n",
647 pci_name(asd_ha->pcidev));
648 err = -ENODEV;
649 goto Out;
650 }
651
652 asd_propagate_sas_addr(asd_ha);
653
654 err = asd_init_phys(asd_ha);
655 if (err) {
656 asd_printk("couldn't initialize phys for %s\n",
657 pci_name(asd_ha->pcidev));
658 goto Out;
659 }
660
661 err = asd_init_scbs(asd_ha);
662 if (err) {
663 asd_printk("couldn't initialize scbs for %s\n",
664 pci_name(asd_ha->pcidev));
665 goto Out;
666 }
667
668 err = asd_init_dl(asd_ha);
669 if (err) {
670 asd_printk("couldn't initialize the done list:%d\n",
671 err);
672 goto Out;
673 }
674
675 err = asd_init_escbs(asd_ha);
676 if (err) {
677 asd_printk("couldn't initialize escbs\n");
678 goto Out;
679 }
680
681 err = asd_init_chip(asd_ha);
682 if (err) {
683 asd_printk("couldn't init the chip\n");
684 goto Out;
685 }
686Out:
687 return err;
688}
689
690/* ---------- Chip reset ---------- */
691
692/**
693 * asd_chip_reset -- reset the host adapter, etc
694 * @asd_ha: pointer to host adapter structure of interest
695 *
696 * Called from the ISR. Hard reset the chip. Let everything
697 * timeout. This should be no different than hot-unplugging the
698 * host adapter. Once everything times out we'll init the chip with
699 * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
700 * XXX finish.
701 */
702static void asd_chip_reset(struct asd_ha_struct *asd_ha)
703{
704 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
705
706 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
707 asd_chip_hardrst(asd_ha);
708 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
709}
710
711/* ---------- Done List Routines ---------- */
712
713static void asd_dl_tasklet_handler(unsigned long data)
714{
715 struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
716 struct asd_seq_data *seq = &asd_ha->seq;
717 unsigned long flags;
718
719 while (1) {
720 struct done_list_struct *dl = &seq->dl[seq->dl_next];
721 struct asd_ascb *ascb;
722
723 if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
724 break;
725
726 /* find the aSCB */
727 spin_lock_irqsave(&seq->tc_index_lock, flags);
728 ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
729 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
730 if (unlikely(!ascb)) {
731 ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
732 goto next_1;
733 } else if (ascb->scb->header.opcode == EMPTY_SCB) {
734 goto out;
735 } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
736 goto next_1;
737 }
738 spin_lock_irqsave(&seq->pend_q_lock, flags);
739 list_del_init(&ascb->list);
740 seq->pending--;
741 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
742 out:
743 ascb->tasklet_complete(ascb, dl);
744
745 next_1:
746 seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
747 if (!seq->dl_next)
748 seq->dl_toggle ^= DL_TOGGLE_MASK;
749 }
750}
751
752/* ---------- Interrupt Service Routines ---------- */
753
754/**
755 * asd_process_donelist_isr -- schedule processing of done list entries
756 * @asd_ha: pointer to host adapter structure
757 */
758static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
759{
760 tasklet_schedule(&asd_ha->seq.dl_tasklet);
761}
762
763/**
764 * asd_com_sas_isr -- process device communication interrupt (COMINT)
765 * @asd_ha: pointer to host adapter structure
766 */
767static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
768{
769 u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
770
771 /* clear COMSTAT int */
772 asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
773
774 if (comstat & CSBUFPERR) {
775 asd_printk("%s: command/status buffer dma parity error\n",
776 pci_name(asd_ha->pcidev));
777 } else if (comstat & CSERR) {
778 int i;
779 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
780 dmaerr &= 0xFF;
781 asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
782 "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
783 pci_name(asd_ha->pcidev),
784 dmaerr,
785 asd_read_reg_dword(asd_ha, CSDMAADR),
786 asd_read_reg_dword(asd_ha, CSDMAADR+4));
787 asd_printk("CSBUFFER:\n");
788 for (i = 0; i < 8; i++) {
789 asd_printk("%08x %08x %08x %08x\n",
790 asd_read_reg_dword(asd_ha, CSBUFFER),
791 asd_read_reg_dword(asd_ha, CSBUFFER+4),
792 asd_read_reg_dword(asd_ha, CSBUFFER+8),
793 asd_read_reg_dword(asd_ha, CSBUFFER+12));
794 }
795 asd_dump_seq_state(asd_ha, 0);
796 } else if (comstat & OVLYERR) {
797 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
798 dmaerr = (dmaerr >> 8) & 0xFF;
799 asd_printk("%s: overlay dma error:0x%x\n",
800 pci_name(asd_ha->pcidev),
801 dmaerr);
802 }
803 asd_chip_reset(asd_ha);
804}
805
806static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
807{
808 static const char *halt_code[256] = {
809 "UNEXPECTED_INTERRUPT0",
810 "UNEXPECTED_INTERRUPT1",
811 "UNEXPECTED_INTERRUPT2",
812 "UNEXPECTED_INTERRUPT3",
813 "UNEXPECTED_INTERRUPT4",
814 "UNEXPECTED_INTERRUPT5",
815 "UNEXPECTED_INTERRUPT6",
816 "UNEXPECTED_INTERRUPT7",
817 "UNEXPECTED_INTERRUPT8",
818 "UNEXPECTED_INTERRUPT9",
819 "UNEXPECTED_INTERRUPT10",
820 [11 ... 19] = "unknown[11,19]",
821 "NO_FREE_SCB_AVAILABLE",
822 "INVALID_SCB_OPCODE",
823 "INVALID_MBX_OPCODE",
824 "INVALID_ATA_STATE",
825 "ATA_QUEUE_FULL",
826 "ATA_TAG_TABLE_FAULT",
827 "ATA_TAG_MASK_FAULT",
828 "BAD_LINK_QUEUE_STATE",
829 "DMA2CHIM_QUEUE_ERROR",
830 "EMPTY_SCB_LIST_FULL",
831 "unknown[30]",
832 "IN_USE_SCB_ON_FREE_LIST",
833 "BAD_OPEN_WAIT_STATE",
834 "INVALID_STP_AFFILIATION",
835 "unknown[34]",
836 "EXEC_QUEUE_ERROR",
837 "TOO_MANY_EMPTIES_NEEDED",
838 "EMPTY_REQ_QUEUE_ERROR",
839 "Q_MONIRTT_MGMT_ERROR",
840 "TARGET_MODE_FLOW_ERROR",
841 "DEVICE_QUEUE_NOT_FOUND",
842 "START_IRTT_TIMER_ERROR",
843 "ABORT_TASK_ILLEGAL_REQ",
844 [43 ... 255] = "unknown[43,255]"
845 };
846
847 if (dchstatus & CSEQINT) {
848 u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
849
850 if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
851 asd_printk("%s: CSEQ arp2int:0x%x\n",
852 pci_name(asd_ha->pcidev),
853 arp2int);
854 } else if (arp2int & ARP2HALTC)
855 asd_printk("%s: CSEQ halted: %s\n",
856 pci_name(asd_ha->pcidev),
857 halt_code[(arp2int>>16)&0xFF]);
858 else
859 asd_printk("%s: CARP2INT:0x%x\n",
860 pci_name(asd_ha->pcidev),
861 arp2int);
862 }
863 if (dchstatus & LSEQINT_MASK) {
864 int lseq;
865 u8 lseq_mask = dchstatus & LSEQINT_MASK;
866
867 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
868 u32 arp2int = asd_read_reg_dword(asd_ha,
869 LmARP2INT(lseq));
870 if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
871 | ARP2CIOPERR)) {
872 asd_printk("%s: LSEQ%d arp2int:0x%x\n",
873 pci_name(asd_ha->pcidev),
874 lseq, arp2int);
875 /* XXX we should only do lseq reset */
876 } else if (arp2int & ARP2HALTC)
877 asd_printk("%s: LSEQ%d halted: %s\n",
878 pci_name(asd_ha->pcidev),
879 lseq,halt_code[(arp2int>>16)&0xFF]);
880 else
881 asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
882 pci_name(asd_ha->pcidev), lseq,
883 arp2int);
884 }
885 }
886 asd_chip_reset(asd_ha);
887}
888
889/**
890 * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
891 * @asd_ha: pointer to host adapter structure
892 */
893static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
894{
895 u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
896
897 if (dchstatus & CFIFTOERR) {
898 asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
899 asd_chip_reset(asd_ha);
900 } else
901 asd_arp2_err(asd_ha, dchstatus);
902}
903
904/**
905 * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
906 * @asd_ha: pointer to host adapter structure
907 */
908static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
909{
910 u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
911
912 if (!(stat0r & ASIERR)) {
913 asd_printk("hmm, EXSI interrupted but no error?\n");
914 return;
915 }
916
917 if (stat0r & ASIFMTERR) {
918 asd_printk("ASI SEEPROM format error for %s\n",
919 pci_name(asd_ha->pcidev));
920 } else if (stat0r & ASISEECHKERR) {
921 u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
922 asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
923 stat1r & CHECKSUM_MASK,
924 pci_name(asd_ha->pcidev));
925 } else {
926 u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
927
928 if (!(statr & CPI2ASIMSTERR_MASK)) {
929 ASD_DPRINTK("hmm, ASIERR?\n");
930 return;
931 } else {
932 u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
933 u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
934
935 asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
936 "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
937 "master id: 0x%x, master err: 0x%x\n",
938 pci_name(asd_ha->pcidev),
939 addr, data,
940 (statr & CPI2ASIBYTECNT_MASK) >> 16,
941 (statr & CPI2ASIBYTEEN_MASK) >> 12,
942 (statr & CPI2ASITARGERR_MASK) >> 8,
943 (statr & CPI2ASITARGMID_MASK) >> 4,
944 (statr & CPI2ASIMSTERR_MASK));
945 }
946 }
947 asd_chip_reset(asd_ha);
948}
949
950/**
951 * asd_hst_pcix_isr -- process host interface interrupts
952 * @asd_ha: pointer to host adapter structure
953 *
954 * Asserted on PCIX errors: target abort, etc.
955 */
956static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
957{
958 u16 status;
959 u32 pcix_status;
960 u32 ecc_status;
961
962 pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
963 pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
964 pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
965
966 if (status & PCI_STATUS_DETECTED_PARITY)
967 asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
968 else if (status & PCI_STATUS_REC_MASTER_ABORT)
969 asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
970 else if (status & PCI_STATUS_REC_TARGET_ABORT)
971 asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
972 else if (status & PCI_STATUS_PARITY)
973 asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
974 else if (pcix_status & RCV_SCE) {
975 asd_printk("received split completion error for %s\n",
976 pci_name(asd_ha->pcidev));
977 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
978 /* XXX: Abort task? */
979 return;
980 } else if (pcix_status & UNEXP_SC) {
981 asd_printk("unexpected split completion for %s\n",
982 pci_name(asd_ha->pcidev));
983 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
984 /* ignore */
985 return;
986 } else if (pcix_status & SC_DISCARD)
987 asd_printk("split completion discarded for %s\n",
988 pci_name(asd_ha->pcidev));
989 else if (ecc_status & UNCOR_ECCERR)
990 asd_printk("uncorrectable ECC error for %s\n",
991 pci_name(asd_ha->pcidev));
992 asd_chip_reset(asd_ha);
993}
994
995/**
996 * asd_hw_isr -- host adapter interrupt service routine
997 * @irq: ignored
998 * @dev_id: pointer to host adapter structure
999 * @regs: ignored
1000 *
1001 * The ISR processes done list entries and level 3 error handling.
1002 */
1003irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs)
1004{
1005 struct asd_ha_struct *asd_ha = dev_id;
1006 u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
1007
1008 if (!chimint)
1009 return IRQ_NONE;
1010
1011 asd_write_reg_dword(asd_ha, CHIMINT, chimint);
1012 (void) asd_read_reg_dword(asd_ha, CHIMINT);
1013
1014 if (chimint & DLAVAIL)
1015 asd_process_donelist_isr(asd_ha);
1016 if (chimint & COMINT)
1017 asd_com_sas_isr(asd_ha);
1018 if (chimint & DEVINT)
1019 asd_dch_sas_isr(asd_ha);
1020 if (chimint & INITERR)
1021 asd_rbi_exsi_isr(asd_ha);
1022 if (chimint & HOSTERR)
1023 asd_hst_pcix_isr(asd_ha);
1024
1025 return IRQ_HANDLED;
1026}
1027
1028/* ---------- SCB handling ---------- */
1029
1030static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1031 gfp_t gfp_flags)
1032{
1033 extern kmem_cache_t *asd_ascb_cache;
1034 struct asd_seq_data *seq = &asd_ha->seq;
1035 struct asd_ascb *ascb;
1036 unsigned long flags;
1037
1038 ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
1039
1040 if (ascb) {
1041 memset(ascb, 0, sizeof(*ascb));
1042 ascb->dma_scb.size = sizeof(struct scb);
1043 ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
1044 gfp_flags,
1045 &ascb->dma_scb.dma_handle);
1046 if (!ascb->dma_scb.vaddr) {
1047 kmem_cache_free(asd_ascb_cache, ascb);
1048 return NULL;
1049 }
1050 memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
1051 asd_init_ascb(asd_ha, ascb);
1052
1053 spin_lock_irqsave(&seq->tc_index_lock, flags);
1054 ascb->tc_index = asd_tc_index_get(seq, ascb);
1055 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
1056 if (ascb->tc_index == -1)
1057 goto undo;
1058
1059 ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
1060 }
1061
1062 return ascb;
1063undo:
1064 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
1065 ascb->dma_scb.dma_handle);
1066 kmem_cache_free(asd_ascb_cache, ascb);
1067 ASD_DPRINTK("no index for ascb\n");
1068 return NULL;
1069}
1070
1071/**
1072 * asd_ascb_alloc_list -- allocate a list of aSCBs
1073 * @asd_ha: pointer to host adapter structure
1074 * @num: pointer to integer number of aSCBs
1075 * @gfp_flags: GFP_ flags.
1076 *
1077 * This is the only function which is used to allocate aSCBs.
1078 * It can allocate one or many. If more than one, then they form
1079 * a linked list in two ways: by their list field of the ascb struct
1080 * and by the next_scb field of the scb_header.
1081 *
1082 * Returns NULL if no memory was available, else pointer to a list
1083 * of ascbs. When this function returns, @num would be the number
1084 * of SCBs which were not able to be allocated, 0 if all requested
1085 * were able to be allocated.
1086 */
1087struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
1088 *asd_ha, int *num,
1089 gfp_t gfp_flags)
1090{
1091 struct asd_ascb *first = NULL;
1092
1093 for ( ; *num > 0; --*num) {
1094 struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
1095
1096 if (!ascb)
1097 break;
1098 else if (!first)
1099 first = ascb;
1100 else {
1101 struct asd_ascb *last = list_entry(first->list.prev,
1102 struct asd_ascb,
1103 list);
1104 list_add_tail(&ascb->list, &first->list);
1105 last->scb->header.next_scb =
1106 cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
1107 }
1108 }
1109
1110 return first;
1111}
1112
1113/**
1114 * asd_swap_head_scb -- swap the head scb
1115 * @asd_ha: pointer to host adapter structure
1116 * @ascb: pointer to the head of an ascb list
1117 *
1118 * The sequencer knows the DMA address of the next SCB to be DMAed to
1119 * the host adapter, from initialization or from the last list DMAed.
1120 * seq->next_scb keeps the address of this SCB. The sequencer will
1121 * DMA to the host adapter this list of SCBs. But the head (first
1122 * element) of this list is not known to the sequencer. Here we swap
1123 * the head of the list with the known SCB (memcpy()).
1124 * Only one memcpy() is required per list so it is in our interest
1125 * to keep the list of SCB as long as possible so that the ratio
1126 * of number of memcpy calls to the number of SCB DMA-ed is as small
1127 * as possible.
1128 *
1129 * LOCKING: called with the pending list lock held.
1130 */
1131static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
1132 struct asd_ascb *ascb)
1133{
1134 struct asd_seq_data *seq = &asd_ha->seq;
1135 struct asd_ascb *last = list_entry(ascb->list.prev,
1136 struct asd_ascb,
1137 list);
1138 struct asd_dma_tok t = ascb->dma_scb;
1139
1140 memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
1141 ascb->dma_scb = seq->next_scb;
1142 ascb->scb = ascb->dma_scb.vaddr;
1143 seq->next_scb = t;
1144 last->scb->header.next_scb =
1145 cpu_to_le64(((u64)seq->next_scb.dma_handle));
1146}
1147
1148/**
1149 * asd_start_timers -- (add and) start timers of SCBs
1150 * @list: pointer to struct list_head of the scbs
1151 * @to: timeout in jiffies
1152 *
1153 * If an SCB in the @list has no timer function, assign the default
1154 * one, then start the timer of the SCB. This function is
1155 * intended to be called from asd_post_ascb_list(), just prior to
1156 * posting the SCBs to the sequencer.
1157 */
1158static inline void asd_start_scb_timers(struct list_head *list)
1159{
1160 struct asd_ascb *ascb;
1161 list_for_each_entry(ascb, list, list) {
1162 if (!ascb->uldd_timer) {
1163 ascb->timer.data = (unsigned long) ascb;
1164 ascb->timer.function = asd_ascb_timedout;
1165 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
1166 add_timer(&ascb->timer);
1167 }
1168 }
1169}
1170
1171/**
1172 * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
1173 * @asd_ha: pointer to a host adapter structure
1174 * @ascb: pointer to the first aSCB in the list
1175 * @num: number of aSCBs in the list (to be posted)
1176 *
1177 * See queueing comment in asd_post_escb_list().
1178 *
1179 * Additional note on queuing: In order to minimize the ratio of memcpy()
1180 * to the number of ascbs sent, we try to batch-send as many ascbs as possible
1181 * in one go.
1182 * Two cases are possible:
1183 * A) can_queue >= num,
1184 * B) can_queue < num.
1185 * Case A: we can send the whole batch at once. Increment "pending"
1186 * in the beginning of this function, when it is checked, in order to
1187 * eliminate races when this function is called by multiple processes.
1188 * Case B: should never happen if the managing layer considers
1189 * lldd_queue_size.
1190 */
1191int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1192 int num)
1193{
1194 unsigned long flags;
1195 LIST_HEAD(list);
1196 int can_queue;
1197
1198 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1199 can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
1200 if (can_queue >= num)
1201 asd_ha->seq.pending += num;
1202 else
1203 can_queue = 0;
1204
1205 if (!can_queue) {
1206 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1207 asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
1208 return -SAS_QUEUE_FULL;
1209 }
1210
1211 asd_swap_head_scb(asd_ha, ascb);
1212
1213 __list_add(&list, ascb->list.prev, &ascb->list);
1214
1215 asd_start_scb_timers(&list);
1216
1217 asd_ha->seq.scbpro += num;
1218 list_splice_init(&list, asd_ha->seq.pend_q.prev);
1219 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1220 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1221
1222 return 0;
1223}
1224
1225/**
1226 * asd_post_escb_list -- post a list of 1 or more empty scb
1227 * @asd_ha: pointer to a host adapter structure
1228 * @ascb: pointer to the first empty SCB in the list
1229 * @num: number of aSCBs in the list (to be posted)
1230 *
1231 * This is essentially the same as asd_post_ascb_list, but we do not
1232 * increment pending, add those to the pending list or get indexes.
1233 * See asd_init_escbs() and asd_init_post_escbs().
1234 *
1235 * Since sending a list of ascbs is a superset of sending a single
1236 * ascb, this function exists to generalize this. More specifically,
1237 * when sending a list of those, we want to do only a _single_
1238 * memcpy() at swap head, as opposed to for each ascb sent (in the
1239 * case of sending them one by one). That is, we want to minimize the
1240 * ratio of memcpy() operations to the number of ascbs sent. The same
1241 * logic applies to asd_post_ascb_list().
1242 */
1243int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1244 int num)
1245{
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1249 asd_swap_head_scb(asd_ha, ascb);
1250 asd_ha->seq.scbpro += num;
1251 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1252 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1253
1254 return 0;
1255}
1256
1257/* ---------- LED ---------- */
1258
1259/**
1260 * asd_turn_led -- turn on/off an LED
1261 * @asd_ha: pointer to host adapter structure
1262 * @phy_id: the PHY id whose LED we want to manupulate
1263 * @op: 1 to turn on, 0 to turn off
1264 */
1265void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1266{
1267 if (phy_id < ASD_MAX_PHYS) {
1268 u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
1269 if (op)
1270 v |= LEDPOL;
1271 else
1272 v &= ~LEDPOL;
1273 asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
1274 }
1275}
1276
1277/**
1278 * asd_control_led -- enable/disable an LED on the board
1279 * @asd_ha: pointer to host adapter structure
1280 * @phy_id: integer, the phy id
1281 * @op: integer, 1 to enable, 0 to disable the LED
1282 *
1283 * First we output enable the LED, then we set the source
1284 * to be an external module.
1285 */
1286void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1287{
1288 if (phy_id < ASD_MAX_PHYS) {
1289 u32 v;
1290
1291 v = asd_read_reg_dword(asd_ha, GPIOOER);
1292 if (op)
1293 v |= (1 << phy_id);
1294 else
1295 v &= ~(1 << phy_id);
1296 asd_write_reg_dword(asd_ha, GPIOOER, v);
1297
1298 v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
1299 if (op)
1300 v |= (1 << phy_id);
1301 else
1302 v &= ~(1 << phy_id);
1303 asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
1304 }
1305}
1306
1307/* ---------- PHY enable ---------- */
1308
1309static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
1310{
1311 struct asd_phy *phy = &asd_ha->phys[phy_id];
1312
1313 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
1314 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
1315 HOTPLUG_DELAY_TIMEOUT);
1316
1317 /* Get defaults from manuf. sector */
1318 /* XXX we need defaults for those in case MS is broken. */
1319 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
1320 phy->phy_desc->phy_control_0);
1321 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
1322 phy->phy_desc->phy_control_1);
1323 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
1324 phy->phy_desc->phy_control_2);
1325 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
1326 phy->phy_desc->phy_control_3);
1327
1328 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
1329 ASD_COMINIT_TIMEOUT);
1330
1331 asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
1332 phy->id_frm_tok->dma_handle);
1333
1334 asd_control_led(asd_ha, phy_id, 1);
1335
1336 return 0;
1337}
1338
1339int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1340{
1341 u8 phy_m;
1342 u8 i;
1343 int num = 0, k;
1344 struct asd_ascb *ascb;
1345 struct asd_ascb *ascb_list;
1346
1347 if (!phy_mask) {
1348 asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
1349 return 0;
1350 }
1351
1352 for_each_phy(phy_mask, phy_m, i) {
1353 num++;
1354 asd_enable_phy(asd_ha, i);
1355 }
1356
1357 k = num;
1358 ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
1359 if (!ascb_list) {
1360 asd_printk("no memory for control phy ascb list\n");
1361 return -ENOMEM;
1362 }
1363 num -= k;
1364
1365 ascb = ascb_list;
1366 for_each_phy(phy_mask, phy_m, i) {
1367 asd_build_control_phy(ascb, i, ENABLE_PHY);
1368 ascb = list_entry(ascb->list.next, struct asd_ascb, list);
1369 }
1370 ASD_DPRINTK("posting %d control phy scbs\n", num);
1371 k = asd_post_ascb_list(asd_ha, ascb_list, num);
1372 if (k)
1373 asd_ascb_free_list(ascb_list);
1374
1375 return k;
1376}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
new file mode 100644
index 000000000000..8498144aa5e1
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -0,0 +1,397 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_HWI_H_
28#define _AIC94XX_HWI_H_
29
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33
34#include <scsi/libsas.h>
35
36#include "aic94xx.h"
37#include "aic94xx_sas.h"
38
39/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */
40#define ASD_MAX_PHYS 8
41#define ASD_PCBA_SN_SIZE 12
42
43/* Those are to be further named properly, the "RAZORx" part, and
44 * subsequently included in include/linux/pci_ids.h.
45 */
46#define PCI_DEVICE_ID_ADAPTEC2_RAZOR10 0x410
47#define PCI_DEVICE_ID_ADAPTEC2_RAZOR12 0x412
48#define PCI_DEVICE_ID_ADAPTEC2_RAZOR1E 0x41E
49#define PCI_DEVICE_ID_ADAPTEC2_RAZOR30 0x430
50#define PCI_DEVICE_ID_ADAPTEC2_RAZOR32 0x432
51#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3E 0x43E
52#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3F 0x43F
53
54struct asd_ha_addrspace {
55 void __iomem *addr;
56 unsigned long start; /* pci resource start */
57 unsigned long len; /* pci resource len */
58 unsigned long flags; /* pci resource flags */
59
60 /* addresses internal to the host adapter */
61 u32 swa_base; /* mmspace 1 (MBAR1) uses this only */
62 u32 swb_base;
63 u32 swc_base;
64};
65
66struct bios_struct {
67 int present;
68 u8 maj;
69 u8 min;
70 u32 bld;
71};
72
73struct unit_element_struct {
74 u16 num;
75 u16 size;
76 void *area;
77};
78
79struct flash_struct {
80 u32 bar;
81 int present;
82 int wide;
83 u8 manuf;
84 u8 dev_id;
85 u8 sec_prot;
86
87 u32 dir_offs;
88};
89
90struct asd_phy_desc {
91 /* From CTRL-A settings, then set to what is appropriate */
92 u8 sas_addr[SAS_ADDR_SIZE];
93 u8 max_sas_lrate;
94 u8 min_sas_lrate;
95 u8 max_sata_lrate;
96 u8 min_sata_lrate;
97 u8 flags;
98#define ASD_CRC_DIS 1
99#define ASD_SATA_SPINUP_HOLD 2
100
101 u8 phy_control_0; /* mode 5 reg 0x160 */
102 u8 phy_control_1; /* mode 5 reg 0x161 */
103 u8 phy_control_2; /* mode 5 reg 0x162 */
104 u8 phy_control_3; /* mode 5 reg 0x163 */
105};
106
107struct asd_dma_tok {
108 void *vaddr;
109 dma_addr_t dma_handle;
110 size_t size;
111};
112
113struct hw_profile {
114 struct bios_struct bios;
115 struct unit_element_struct ue;
116 struct flash_struct flash;
117
118 u8 sas_addr[SAS_ADDR_SIZE];
119 char pcba_sn[ASD_PCBA_SN_SIZE+1];
120
121 u8 enabled_phys; /* mask of enabled phys */
122 struct asd_phy_desc phy_desc[ASD_MAX_PHYS];
123 u32 max_scbs; /* absolute sequencer scb queue size */
124 struct asd_dma_tok *scb_ext;
125 u32 max_ddbs;
126 struct asd_dma_tok *ddb_ext;
127
128 spinlock_t ddb_lock;
129 void *ddb_bitmap;
130
131 int num_phys; /* ENABLEABLE */
132 int max_phys; /* REPORTED + ENABLEABLE */
133
134 unsigned addr_range; /* max # of addrs; max # of possible ports */
135 unsigned port_name_base;
136 unsigned dev_name_base;
137 unsigned sata_name_base;
138};
139
140struct asd_ascb {
141 struct list_head list;
142 struct asd_ha_struct *ha;
143
144 struct scb *scb; /* equals dma_scb->vaddr */
145 struct asd_dma_tok dma_scb;
146 struct asd_dma_tok *sg_arr;
147
148 void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *);
149 u8 uldd_timer:1;
150
151 /* internally generated command */
152 struct timer_list timer;
153 struct completion completion;
154 u8 tag_valid:1;
155 __be16 tag; /* error recovery only */
156
157 /* If this is an Empty SCB, index of first edb in seq->edb_arr. */
158 int edb_index;
159
160 /* Used by the timer timeout function. */
161 int tc_index;
162
163 void *uldd_task;
164};
165
166#define ASD_DL_SIZE_BITS 0x8
167#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS))
168#define ASD_DEF_DL_TOGGLE 0x01
169
170struct asd_seq_data {
171 spinlock_t pend_q_lock;
172 u16 scbpro;
173 int pending;
174 struct list_head pend_q;
175 int can_queue; /* per adapter */
176 struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */
177
178 spinlock_t tc_index_lock;
179 void **tc_index_array;
180 void *tc_index_bitmap;
181 int tc_index_bitmap_bits;
182
183 struct tasklet_struct dl_tasklet;
184 struct done_list_struct *dl; /* array of done list entries, equals */
185 struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */
186 int dl_toggle;
187 int dl_next;
188
189 int num_edbs;
190 struct asd_dma_tok **edb_arr;
191 int num_escbs;
192 struct asd_ascb **escb_arr; /* array of pointers to escbs */
193};
194
195/* This is the Host Adapter structure. It describes the hardware
196 * SAS adapter.
197 */
198struct asd_ha_struct {
199 struct pci_dev *pcidev;
200 const char *name;
201
202 struct sas_ha_struct sas_ha;
203
204 u8 revision_id;
205
206 int iospace;
207 spinlock_t iolock;
208 struct asd_ha_addrspace io_handle[2];
209
210 struct hw_profile hw_prof;
211
212 struct asd_phy phys[ASD_MAX_PHYS];
213 struct asd_sas_port ports[ASD_MAX_PHYS];
214
215 struct dma_pool *scb_pool;
216
217 struct asd_seq_data seq; /* sequencer related */
218};
219
220/* ---------- Common macros ---------- */
221
222#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle))
223#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \
224 ? ((u32)((__dma_handle) >> 32)) \
225 : ((u32)0))
226
227#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev))
228#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \
229 && ((__site_no) & 0xF0FF) > 0x001F)
230/* For each bit set in __lseq_mask, set __lseq to equal the bit
231 * position of the set bit and execute the statement following.
232 * __mc is the temporary mask, used as a mask "counter".
233 */
234#define for_each_sequencer(__lseq_mask, __mc, __lseq) \
235 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
236 if (((__mc) & 1))
237#define for_each_phy(__lseq_mask, __mc, __lseq) \
238 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
239 if (((__mc) & 1))
240
241#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I)))
242
243/* ---------- DMA allocs ---------- */
244
245static inline struct asd_dma_tok *asd_dmatok_alloc(gfp_t flags)
246{
247 return kmem_cache_alloc(asd_dma_token_cache, flags);
248}
249
250static inline void asd_dmatok_free(struct asd_dma_tok *token)
251{
252 kmem_cache_free(asd_dma_token_cache, token);
253}
254
255static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct *
256 asd_ha, size_t size,
257 gfp_t flags)
258{
259 struct asd_dma_tok *token = asd_dmatok_alloc(flags);
260 if (token) {
261 token->size = size;
262 token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev,
263 token->size,
264 &token->dma_handle,
265 flags);
266 if (!token->vaddr) {
267 asd_dmatok_free(token);
268 token = NULL;
269 }
270 }
271 return token;
272}
273
274static inline void asd_free_coherent(struct asd_ha_struct *asd_ha,
275 struct asd_dma_tok *token)
276{
277 if (token) {
278 dma_free_coherent(&asd_ha->pcidev->dev, token->size,
279 token->vaddr, token->dma_handle);
280 asd_dmatok_free(token);
281 }
282}
283
284static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
285 struct asd_ascb *ascb)
286{
287 INIT_LIST_HEAD(&ascb->list);
288 ascb->scb = ascb->dma_scb.vaddr;
289 ascb->ha = asd_ha;
290 ascb->timer.function = NULL;
291 init_timer(&ascb->timer);
292 ascb->tc_index = -1;
293 init_completion(&ascb->completion);
294}
295
296/* Must be called with the tc_index_lock held!
297 */
298static inline void asd_tc_index_release(struct asd_seq_data *seq, int index)
299{
300 seq->tc_index_array[index] = NULL;
301 clear_bit(index, seq->tc_index_bitmap);
302}
303
304/* Must be called with the tc_index_lock held!
305 */
306static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr)
307{
308 int index;
309
310 index = find_first_zero_bit(seq->tc_index_bitmap,
311 seq->tc_index_bitmap_bits);
312 if (index == seq->tc_index_bitmap_bits)
313 return -1;
314
315 seq->tc_index_array[index] = ptr;
316 set_bit(index, seq->tc_index_bitmap);
317
318 return index;
319}
320
321/* Must be called with the tc_index_lock held!
322 */
323static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index)
324{
325 return seq->tc_index_array[index];
326}
327
328/**
329 * asd_ascb_free -- free a single aSCB after is has completed
330 * @ascb: pointer to the aSCB of interest
331 *
332 * This frees an aSCB after it has been executed/completed by
333 * the sequencer.
334 */
335static inline void asd_ascb_free(struct asd_ascb *ascb)
336{
337 if (ascb) {
338 struct asd_ha_struct *asd_ha = ascb->ha;
339 unsigned long flags;
340
341 BUG_ON(!list_empty(&ascb->list));
342 spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags);
343 asd_tc_index_release(&ascb->ha->seq, ascb->tc_index);
344 spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags);
345 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
346 ascb->dma_scb.dma_handle);
347 kmem_cache_free(asd_ascb_cache, ascb);
348 }
349}
350
351/**
352 * asd_ascb_list_free -- free a list of ascbs
353 * @ascb_list: a list of ascbs
354 *
355 * This function will free a list of ascbs allocated by asd_ascb_alloc_list.
356 * It is used when say the scb queueing function returned QUEUE_FULL,
357 * and we do not need the ascbs any more.
358 */
359static inline void asd_ascb_free_list(struct asd_ascb *ascb_list)
360{
361 LIST_HEAD(list);
362 struct list_head *n, *pos;
363
364 __list_add(&list, ascb_list->list.prev, &ascb_list->list);
365 list_for_each_safe(pos, n, &list) {
366 list_del_init(pos);
367 asd_ascb_free(list_entry(pos, struct asd_ascb, list));
368 }
369}
370
371/* ---------- Function declarations ---------- */
372
373int asd_init_hw(struct asd_ha_struct *asd_ha);
374irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs);
375
376
377struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
378 *asd_ha, int *num,
379 gfp_t gfp_mask);
380
381int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
382 int num);
383int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
384 int num);
385
386int asd_init_post_escbs(struct asd_ha_struct *asd_ha);
387void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc);
388void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
389void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
390int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
391void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
392 u8 subfunc);
393
394void asd_ascb_timedout(unsigned long data);
395int asd_chip_hardrst(struct asd_ha_struct *asd_ha);
396
397#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
new file mode 100644
index 000000000000..ee2ccad70487
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -0,0 +1,866 @@
1/*
2 * Aic94xx SAS/SATA driver initialization.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33
34#include <scsi/scsi_host.h>
35
36#include "aic94xx.h"
37#include "aic94xx_reg.h"
38#include "aic94xx_hwi.h"
39#include "aic94xx_seq.h"
40
41/* The format is "version.release.patchlevel" */
42#define ASD_DRIVER_VERSION "1.0.2"
43
44static int use_msi = 0;
45module_param_named(use_msi, use_msi, int, S_IRUGO);
46MODULE_PARM_DESC(use_msi, "\n"
47 "\tEnable(1) or disable(0) using PCI MSI.\n"
48 "\tDefault: 0");
49
50static int lldd_max_execute_num = 0;
51module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
52MODULE_PARM_DESC(collector, "\n"
53 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
54 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
55 "\tThe aic94xx SAS LLDD supports both modes.\n"
56 "\tDefault: 0 (Direct Mode).\n");
57
58char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
59
60static struct scsi_transport_template *aic94xx_transport_template;
61
62static struct scsi_host_template aic94xx_sht = {
63 .module = THIS_MODULE,
64 /* .name is initialized */
65 .name = "aic94xx",
66 .queuecommand = sas_queuecommand,
67 .target_alloc = sas_target_alloc,
68 .slave_configure = sas_slave_configure,
69 .slave_destroy = sas_slave_destroy,
70 .change_queue_depth = sas_change_queue_depth,
71 .change_queue_type = sas_change_queue_type,
72 .bios_param = sas_bios_param,
73 .can_queue = 1,
74 .cmd_per_lun = 1,
75 .this_id = -1,
76 .sg_tablesize = SG_ALL,
77 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
78 .use_clustering = ENABLE_CLUSTERING,
79};
80
81static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
82{
83 int err, i;
84 struct asd_ha_addrspace *io_handle;
85
86 asd_ha->iospace = 0;
87 for (i = 0; i < 3; i += 2) {
88 io_handle = &asd_ha->io_handle[i==0?0:1];
89 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
90 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
91 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
92 err = -ENODEV;
93 if (!io_handle->start || !io_handle->len) {
94 asd_printk("MBAR%d start or length for %s is 0.\n",
95 i==0?0:1, pci_name(asd_ha->pcidev));
96 goto Err;
97 }
98 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
99 if (err) {
100 asd_printk("couldn't reserve memory region for %s\n",
101 pci_name(asd_ha->pcidev));
102 goto Err;
103 }
104 if (io_handle->flags & IORESOURCE_CACHEABLE)
105 io_handle->addr = ioremap(io_handle->start,
106 io_handle->len);
107 else
108 io_handle->addr = ioremap_nocache(io_handle->start,
109 io_handle->len);
110 if (!io_handle->addr) {
111 asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
112 pci_name(asd_ha->pcidev));
113 goto Err_unreq;
114 }
115 }
116
117 return 0;
118Err_unreq:
119 pci_release_region(asd_ha->pcidev, i);
120Err:
121 if (i > 0) {
122 io_handle = &asd_ha->io_handle[0];
123 iounmap(io_handle->addr);
124 pci_release_region(asd_ha->pcidev, 0);
125 }
126 return err;
127}
128
129static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha)
130{
131 struct asd_ha_addrspace *io_handle;
132
133 io_handle = &asd_ha->io_handle[1];
134 iounmap(io_handle->addr);
135 pci_release_region(asd_ha->pcidev, 2);
136
137 io_handle = &asd_ha->io_handle[0];
138 iounmap(io_handle->addr);
139 pci_release_region(asd_ha->pcidev, 0);
140}
141
142static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
143{
144 int i = PCI_IOBAR_OFFSET, err;
145 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
146
147 asd_ha->iospace = 1;
148 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
149 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
150 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
151 io_handle->addr = (void __iomem *) io_handle->start;
152 if (!io_handle->start || !io_handle->len) {
153 asd_printk("couldn't get IO ports for %s\n",
154 pci_name(asd_ha->pcidev));
155 return -ENODEV;
156 }
157 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
158 if (err) {
159 asd_printk("couldn't reserve io space for %s\n",
160 pci_name(asd_ha->pcidev));
161 }
162
163 return err;
164}
165
166static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha)
167{
168 pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
169}
170
171static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha)
172{
173 int err;
174 u16 cmd_reg;
175
176 err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg);
177 if (err) {
178 asd_printk("couldn't read command register of %s\n",
179 pci_name(asd_ha->pcidev));
180 goto Err;
181 }
182
183 err = -ENODEV;
184 if (cmd_reg & PCI_COMMAND_MEMORY) {
185 if ((err = asd_map_memio(asd_ha)))
186 goto Err;
187 } else if (cmd_reg & PCI_COMMAND_IO) {
188 if ((err = asd_map_ioport(asd_ha)))
189 goto Err;
190 asd_printk("%s ioport mapped -- upgrade your hardware\n",
191 pci_name(asd_ha->pcidev));
192 } else {
193 asd_printk("no proper device access to %s\n",
194 pci_name(asd_ha->pcidev));
195 goto Err;
196 }
197
198 return 0;
199Err:
200 return err;
201}
202
203static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha)
204{
205 if (asd_ha->iospace)
206 asd_unmap_ioport(asd_ha);
207 else
208 asd_unmap_memio(asd_ha);
209}
210
211static const char *asd_dev_rev[30] = {
212 [0] = "A0",
213 [1] = "A1",
214 [8] = "B0",
215};
216
217static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
218{
219 int err, i;
220
221 err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID,
222 &asd_ha->revision_id);
223 if (err) {
224 asd_printk("couldn't read REVISION ID register of %s\n",
225 pci_name(asd_ha->pcidev));
226 goto Err;
227 }
228 err = -ENODEV;
229 if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
230 asd_printk("%s is revision %s (%X), which is not supported\n",
231 pci_name(asd_ha->pcidev),
232 asd_dev_rev[asd_ha->revision_id],
233 asd_ha->revision_id);
234 goto Err;
235 }
236 /* Provide some sane default values. */
237 asd_ha->hw_prof.max_scbs = 512;
238 asd_ha->hw_prof.max_ddbs = 128;
239 asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
240 /* All phys are enabled, by default. */
241 asd_ha->hw_prof.enabled_phys = 0xFF;
242 for (i = 0; i < ASD_MAX_PHYS; i++) {
243 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
244 SAS_LINK_RATE_3_0_GBPS;
245 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
246 SAS_LINK_RATE_1_5_GBPS;
247 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
248 SAS_LINK_RATE_1_5_GBPS;
249 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
250 SAS_LINK_RATE_1_5_GBPS;
251 }
252
253 return 0;
254Err:
255 return err;
256}
257
258static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
259{
260 int err = asd_common_setup(asd_ha);
261
262 if (err)
263 return err;
264
265 asd_ha->hw_prof.addr_range = 8;
266 asd_ha->hw_prof.port_name_base = 0;
267 asd_ha->hw_prof.dev_name_base = 8;
268 asd_ha->hw_prof.sata_name_base = 16;
269
270 return 0;
271}
272
273static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha)
274{
275 int err = asd_common_setup(asd_ha);
276
277 if (err)
278 return err;
279
280 asd_ha->hw_prof.addr_range = 4;
281 asd_ha->hw_prof.port_name_base = 0;
282 asd_ha->hw_prof.dev_name_base = 4;
283 asd_ha->hw_prof.sata_name_base = 8;
284
285 return 0;
286}
287
288static ssize_t asd_show_dev_rev(struct device *dev,
289 struct device_attribute *attr, char *buf)
290{
291 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
292 return snprintf(buf, PAGE_SIZE, "%s\n",
293 asd_dev_rev[asd_ha->revision_id]);
294}
295static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
296
297static ssize_t asd_show_dev_bios_build(struct device *dev,
298 struct device_attribute *attr,char *buf)
299{
300 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
301 return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
302}
303static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL);
304
305static ssize_t asd_show_dev_pcba_sn(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
309 return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
310}
311static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
312
313static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
314{
315 device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
316 device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
317 device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
318}
319
320static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
321{
322 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
323 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
324 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
325}
326
327/* The first entry, 0, is used for dynamic ids, the rest for devices
328 * we know about.
329 */
330static struct asd_pcidev_struct {
331 const char * name;
332 int (*setup)(struct asd_ha_struct *asd_ha);
333} asd_pcidev_data[] = {
334 /* Id 0 is used for dynamic ids. */
335 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
336 .setup = asd_aic9410_setup
337 },
338 { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter",
339 .setup = asd_aic9410_setup
340 },
341 { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter",
342 .setup = asd_aic9405_setup
343 },
344};
345
346static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
347{
348 asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
349 &asd_ha->pcidev->dev,
350 sizeof(struct scb),
351 8, 0);
352 if (!asd_ha->scb_pool) {
353 asd_printk("couldn't create scb pool\n");
354 return -ENOMEM;
355 }
356
357 return 0;
358}
359
360/**
361 * asd_free_edbs -- free empty data buffers
362 * asd_ha: pointer to host adapter structure
363 */
364static inline void asd_free_edbs(struct asd_ha_struct *asd_ha)
365{
366 struct asd_seq_data *seq = &asd_ha->seq;
367 int i;
368
369 for (i = 0; i < seq->num_edbs; i++)
370 asd_free_coherent(asd_ha, seq->edb_arr[i]);
371 kfree(seq->edb_arr);
372 seq->edb_arr = NULL;
373}
374
375static inline void asd_free_escbs(struct asd_ha_struct *asd_ha)
376{
377 struct asd_seq_data *seq = &asd_ha->seq;
378 int i;
379
380 for (i = 0; i < seq->num_escbs; i++) {
381 if (!list_empty(&seq->escb_arr[i]->list))
382 list_del_init(&seq->escb_arr[i]->list);
383
384 asd_ascb_free(seq->escb_arr[i]);
385 }
386 kfree(seq->escb_arr);
387 seq->escb_arr = NULL;
388}
389
390static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
391{
392 int i;
393
394 if (asd_ha->hw_prof.ddb_ext)
395 asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext);
396 if (asd_ha->hw_prof.scb_ext)
397 asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
398
399 if (asd_ha->hw_prof.ddb_bitmap)
400 kfree(asd_ha->hw_prof.ddb_bitmap);
401 asd_ha->hw_prof.ddb_bitmap = NULL;
402
403 for (i = 0; i < ASD_MAX_PHYS; i++) {
404 struct asd_phy *phy = &asd_ha->phys[i];
405
406 asd_free_coherent(asd_ha, phy->id_frm_tok);
407 }
408 if (asd_ha->seq.escb_arr)
409 asd_free_escbs(asd_ha);
410 if (asd_ha->seq.edb_arr)
411 asd_free_edbs(asd_ha);
412 if (asd_ha->hw_prof.ue.area) {
413 kfree(asd_ha->hw_prof.ue.area);
414 asd_ha->hw_prof.ue.area = NULL;
415 }
416 if (asd_ha->seq.tc_index_array) {
417 kfree(asd_ha->seq.tc_index_array);
418 kfree(asd_ha->seq.tc_index_bitmap);
419 asd_ha->seq.tc_index_array = NULL;
420 asd_ha->seq.tc_index_bitmap = NULL;
421 }
422 if (asd_ha->seq.actual_dl) {
423 asd_free_coherent(asd_ha, asd_ha->seq.actual_dl);
424 asd_ha->seq.actual_dl = NULL;
425 asd_ha->seq.dl = NULL;
426 }
427 if (asd_ha->seq.next_scb.vaddr) {
428 dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
429 asd_ha->seq.next_scb.dma_handle);
430 asd_ha->seq.next_scb.vaddr = NULL;
431 }
432 dma_pool_destroy(asd_ha->scb_pool);
433 asd_ha->scb_pool = NULL;
434}
435
436kmem_cache_t *asd_dma_token_cache;
437kmem_cache_t *asd_ascb_cache;
438
439static int asd_create_global_caches(void)
440{
441 if (!asd_dma_token_cache) {
442 asd_dma_token_cache
443 = kmem_cache_create(ASD_DRIVER_NAME "_dma_token",
444 sizeof(struct asd_dma_tok),
445 0,
446 SLAB_HWCACHE_ALIGN,
447 NULL, NULL);
448 if (!asd_dma_token_cache) {
449 asd_printk("couldn't create dma token cache\n");
450 return -ENOMEM;
451 }
452 }
453
454 if (!asd_ascb_cache) {
455 asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb",
456 sizeof(struct asd_ascb),
457 0,
458 SLAB_HWCACHE_ALIGN,
459 NULL, NULL);
460 if (!asd_ascb_cache) {
461 asd_printk("couldn't create ascb cache\n");
462 goto Err;
463 }
464 }
465
466 return 0;
467Err:
468 kmem_cache_destroy(asd_dma_token_cache);
469 asd_dma_token_cache = NULL;
470 return -ENOMEM;
471}
472
473static void asd_destroy_global_caches(void)
474{
475 if (asd_dma_token_cache)
476 kmem_cache_destroy(asd_dma_token_cache);
477 asd_dma_token_cache = NULL;
478
479 if (asd_ascb_cache)
480 kmem_cache_destroy(asd_ascb_cache);
481 asd_ascb_cache = NULL;
482}
483
484static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
485{
486 int i;
487 struct asd_sas_phy **sas_phys =
488 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL);
489 struct asd_sas_port **sas_ports =
490 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL);
491
492 if (!sas_phys || !sas_ports) {
493 kfree(sas_phys);
494 kfree(sas_ports);
495 return -ENOMEM;
496 }
497
498 asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
499 asd_ha->sas_ha.lldd_module = THIS_MODULE;
500 asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
501
502 for (i = 0; i < ASD_MAX_PHYS; i++) {
503 sas_phys[i] = &asd_ha->phys[i].sas_phy;
504 sas_ports[i] = &asd_ha->ports[i];
505 }
506
507 asd_ha->sas_ha.sas_phy = sas_phys;
508 asd_ha->sas_ha.sas_port= sas_ports;
509 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
510
511 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
512
513 return sas_register_ha(&asd_ha->sas_ha);
514}
515
516static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
517{
518 int err;
519
520 err = sas_unregister_ha(&asd_ha->sas_ha);
521
522 sas_remove_host(asd_ha->sas_ha.core.shost);
523 scsi_remove_host(asd_ha->sas_ha.core.shost);
524 scsi_host_put(asd_ha->sas_ha.core.shost);
525
526 kfree(asd_ha->sas_ha.sas_phy);
527 kfree(asd_ha->sas_ha.sas_port);
528
529 return err;
530}
531
532static int __devinit asd_pci_probe(struct pci_dev *dev,
533 const struct pci_device_id *id)
534{
535 struct asd_pcidev_struct *asd_dev;
536 unsigned asd_id = (unsigned) id->driver_data;
537 struct asd_ha_struct *asd_ha;
538 struct Scsi_Host *shost;
539 int err;
540
541 if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) {
542 asd_printk("wrong driver_data in PCI table\n");
543 return -ENODEV;
544 }
545
546 if ((err = pci_enable_device(dev))) {
547 asd_printk("couldn't enable device %s\n", pci_name(dev));
548 return err;
549 }
550
551 pci_set_master(dev);
552
553 err = -ENOMEM;
554
555 shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *));
556 if (!shost)
557 goto Err;
558
559 asd_dev = &asd_pcidev_data[asd_id];
560
561 asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL);
562 if (!asd_ha) {
563 asd_printk("out of memory\n");
564 goto Err;
565 }
566 asd_ha->pcidev = dev;
567 asd_ha->sas_ha.pcidev = asd_ha->pcidev;
568 asd_ha->sas_ha.lldd_ha = asd_ha;
569
570 asd_ha->name = asd_dev->name;
571 asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
572
573 SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
574 asd_ha->sas_ha.core.shost = shost;
575 shost->transportt = aic94xx_transport_template;
576 shost->max_id = ~0;
577 shost->max_lun = ~0;
578 shost->max_cmd_len = 16;
579
580 err = scsi_add_host(shost, &dev->dev);
581 if (err) {
582 scsi_host_put(shost);
583 goto Err_free;
584 }
585
586
587
588 err = asd_dev->setup(asd_ha);
589 if (err)
590 goto Err_free;
591
592 err = -ENODEV;
593 if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)
594 && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK))
595 ;
596 else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
597 && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
598 ;
599 else {
600 asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
601 goto Err_free;
602 }
603
604 pci_set_drvdata(dev, asd_ha);
605
606 err = asd_map_ha(asd_ha);
607 if (err)
608 goto Err_free;
609
610 err = asd_create_ha_caches(asd_ha);
611 if (err)
612 goto Err_unmap;
613
614 err = asd_init_hw(asd_ha);
615 if (err)
616 goto Err_free_cache;
617
618 asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled "
619 "phys, flash %s, BIOS %s%d\n",
620 pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr),
621 asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys,
622 asd_ha->hw_prof.num_phys,
623 asd_ha->hw_prof.flash.present ? "present" : "not present",
624 asd_ha->hw_prof.bios.present ? "build " : "not present",
625 asd_ha->hw_prof.bios.bld);
626
627 shost->can_queue = asd_ha->seq.can_queue;
628
629 if (use_msi)
630 pci_enable_msi(asd_ha->pcidev);
631
632 err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, SA_SHIRQ,
633 ASD_DRIVER_NAME, asd_ha);
634 if (err) {
635 asd_printk("couldn't get irq %d for %s\n",
636 asd_ha->pcidev->irq, pci_name(asd_ha->pcidev));
637 goto Err_irq;
638 }
639 asd_enable_ints(asd_ha);
640
641 err = asd_init_post_escbs(asd_ha);
642 if (err) {
643 asd_printk("couldn't post escbs for %s\n",
644 pci_name(asd_ha->pcidev));
645 goto Err_escbs;
646 }
647 ASD_DPRINTK("escbs posted\n");
648
649 asd_create_dev_attrs(asd_ha);
650
651 err = asd_register_sas_ha(asd_ha);
652 if (err)
653 goto Err_reg_sas;
654
655 err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
656 if (err) {
657 asd_printk("coudln't enable phys, err:%d\n", err);
658 goto Err_en_phys;
659 }
660 ASD_DPRINTK("enabled phys\n");
661 /* give the phy enabling interrupt event time to come in (1s
662 * is empirically about all it takes) */
663 ssleep(1);
664 /* Wait for discovery to finish */
665 scsi_flush_work(asd_ha->sas_ha.core.shost);
666
667 return 0;
668Err_en_phys:
669 asd_unregister_sas_ha(asd_ha);
670Err_reg_sas:
671 asd_remove_dev_attrs(asd_ha);
672Err_escbs:
673 asd_disable_ints(asd_ha);
674 free_irq(dev->irq, asd_ha);
675Err_irq:
676 if (use_msi)
677 pci_disable_msi(dev);
678 asd_chip_hardrst(asd_ha);
679Err_free_cache:
680 asd_destroy_ha_caches(asd_ha);
681Err_unmap:
682 asd_unmap_ha(asd_ha);
683Err_free:
684 kfree(asd_ha);
685 scsi_remove_host(shost);
686Err:
687 pci_disable_device(dev);
688 return err;
689}
690
691static void asd_free_queues(struct asd_ha_struct *asd_ha)
692{
693 unsigned long flags;
694 LIST_HEAD(pending);
695 struct list_head *n, *pos;
696
697 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
698 asd_ha->seq.pending = 0;
699 list_splice_init(&asd_ha->seq.pend_q, &pending);
700 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
701
702 if (!list_empty(&pending))
703 ASD_DPRINTK("Uh-oh! Pending is not empty!\n");
704
705 list_for_each_safe(pos, n, &pending) {
706 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
707 list_del_init(pos);
708 ASD_DPRINTK("freeing from pending\n");
709 asd_ascb_free(ascb);
710 }
711}
712
713static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
714{
715 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
716 u8 i;
717
718 for_each_phy(phy_mask, phy_mask, i) {
719 asd_turn_led(asd_ha, i, 0);
720 asd_control_led(asd_ha, i, 0);
721 }
722}
723
724static void __devexit asd_pci_remove(struct pci_dev *dev)
725{
726 struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
727
728 if (!asd_ha)
729 return;
730
731 asd_unregister_sas_ha(asd_ha);
732
733 asd_disable_ints(asd_ha);
734
735 asd_remove_dev_attrs(asd_ha);
736
737 /* XXX more here as needed */
738
739 free_irq(dev->irq, asd_ha);
740 if (use_msi)
741 pci_disable_msi(asd_ha->pcidev);
742 asd_turn_off_leds(asd_ha);
743 asd_chip_hardrst(asd_ha);
744 asd_free_queues(asd_ha);
745 asd_destroy_ha_caches(asd_ha);
746 asd_unmap_ha(asd_ha);
747 kfree(asd_ha);
748 pci_disable_device(dev);
749 return;
750}
751
752static ssize_t asd_version_show(struct device_driver *driver, char *buf)
753{
754 return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
755}
756static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
757
758static void asd_create_driver_attrs(struct device_driver *driver)
759{
760 driver_create_file(driver, &driver_attr_version);
761}
762
763static void asd_remove_driver_attrs(struct device_driver *driver)
764{
765 driver_remove_file(driver, &driver_attr_version);
766}
767
768static struct sas_domain_function_template aic94xx_transport_functions = {
769 .lldd_port_formed = asd_update_port_links,
770
771 .lldd_dev_found = asd_dev_found,
772 .lldd_dev_gone = asd_dev_gone,
773
774 .lldd_execute_task = asd_execute_task,
775
776 .lldd_abort_task = asd_abort_task,
777 .lldd_abort_task_set = asd_abort_task_set,
778 .lldd_clear_aca = asd_clear_aca,
779 .lldd_clear_task_set = asd_clear_task_set,
780 .lldd_I_T_nexus_reset = NULL,
781 .lldd_lu_reset = asd_lu_reset,
782 .lldd_query_task = asd_query_task,
783
784 .lldd_clear_nexus_port = asd_clear_nexus_port,
785 .lldd_clear_nexus_ha = asd_clear_nexus_ha,
786
787 .lldd_control_phy = asd_control_phy,
788};
789
790static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
791 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR10),
792 0, 0, 1},
793 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR12),
794 0, 0, 1},
795 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E),
796 0, 0, 1},
797 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30),
798 0, 0, 2},
799 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32),
800 0, 0, 2},
801 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3E),
802 0, 0, 2},
803 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3F),
804 0, 0, 2},
805 {}
806};
807
808MODULE_DEVICE_TABLE(pci, aic94xx_pci_table);
809
810static struct pci_driver aic94xx_pci_driver = {
811 .name = ASD_DRIVER_NAME,
812 .id_table = aic94xx_pci_table,
813 .probe = asd_pci_probe,
814 .remove = __devexit_p(asd_pci_remove),
815};
816
817static int __init aic94xx_init(void)
818{
819 int err;
820
821
822 asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION,
823 ASD_DRIVER_VERSION);
824
825 err = asd_create_global_caches();
826 if (err)
827 return err;
828
829 aic94xx_transport_template =
830 sas_domain_attach_transport(&aic94xx_transport_functions);
831 if (!aic94xx_transport_template)
832 goto out_destroy_caches;
833
834 err = pci_register_driver(&aic94xx_pci_driver);
835 if (err)
836 goto out_release_transport;
837
838 asd_create_driver_attrs(&aic94xx_pci_driver.driver);
839
840 return err;
841
842 out_release_transport:
843 sas_release_transport(aic94xx_transport_template);
844 out_destroy_caches:
845 asd_destroy_global_caches();
846
847 return err;
848}
849
850static void __exit aic94xx_exit(void)
851{
852 asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
853 pci_unregister_driver(&aic94xx_pci_driver);
854 sas_release_transport(aic94xx_transport_template);
855 asd_destroy_global_caches();
856 asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
857 ASD_DRIVER_VERSION);
858}
859
860module_init(aic94xx_init);
861module_exit(aic94xx_exit);
862
863MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
864MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION);
865MODULE_LICENSE("GPL v2");
866MODULE_VERSION(ASD_DRIVER_VERSION);
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
new file mode 100644
index 000000000000..f210dac3203d
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -0,0 +1,332 @@
1/*
2 * Aic94xx SAS/SATA driver register access.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include "aic94xx_reg.h"
29#include "aic94xx.h"
30
31/* Writing to device address space.
32 * Offset comes before value to remind that the operation of
33 * this function is *offs = val.
34 */
35static inline void asd_write_byte(struct asd_ha_struct *asd_ha,
36 unsigned long offs, u8 val)
37{
38 if (unlikely(asd_ha->iospace))
39 outb(val,
40 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
41 else
42 writeb(val, asd_ha->io_handle[0].addr + offs);
43 wmb();
44}
45
46static inline void asd_write_word(struct asd_ha_struct *asd_ha,
47 unsigned long offs, u16 val)
48{
49 if (unlikely(asd_ha->iospace))
50 outw(val,
51 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
52 else
53 writew(val, asd_ha->io_handle[0].addr + offs);
54 wmb();
55}
56
57static inline void asd_write_dword(struct asd_ha_struct *asd_ha,
58 unsigned long offs, u32 val)
59{
60 if (unlikely(asd_ha->iospace))
61 outl(val,
62 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
63 else
64 writel(val, asd_ha->io_handle[0].addr + offs);
65 wmb();
66}
67
68/* Reading from device address space.
69 */
70static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha,
71 unsigned long offs)
72{
73 u8 val;
74 if (unlikely(asd_ha->iospace))
75 val = inb((unsigned long) asd_ha->io_handle[0].addr
76 + (offs & 0xFF));
77 else
78 val = readb(asd_ha->io_handle[0].addr + offs);
79 rmb();
80 return val;
81}
82
83static inline u16 asd_read_word(struct asd_ha_struct *asd_ha,
84 unsigned long offs)
85{
86 u16 val;
87 if (unlikely(asd_ha->iospace))
88 val = inw((unsigned long)asd_ha->io_handle[0].addr
89 + (offs & 0xFF));
90 else
91 val = readw(asd_ha->io_handle[0].addr + offs);
92 rmb();
93 return val;
94}
95
96static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha,
97 unsigned long offs)
98{
99 u32 val;
100 if (unlikely(asd_ha->iospace))
101 val = inl((unsigned long) asd_ha->io_handle[0].addr
102 + (offs & 0xFF));
103 else
104 val = readl(asd_ha->io_handle[0].addr + offs);
105 rmb();
106 return val;
107}
108
109static inline u32 asd_mem_offs_swa(void)
110{
111 return 0;
112}
113
114static inline u32 asd_mem_offs_swc(void)
115{
116 return asd_mem_offs_swa() + MBAR0_SWA_SIZE;
117}
118
119static inline u32 asd_mem_offs_swb(void)
120{
121 return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20;
122}
123
124/* We know that the register wanted is in the range
125 * of the sliding window.
126 */
127#define ASD_READ_SW(ww, type, ord) \
128static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\
129 u32 reg) \
130{ \
131 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
132 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
133 return asd_read_##ord (asd_ha, (unsigned long) map_offs); \
134}
135
136#define ASD_WRITE_SW(ww, type, ord) \
137static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\
138 u32 reg, type val) \
139{ \
140 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
141 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
142 asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \
143}
144
145ASD_READ_SW(swa, u8, byte);
146ASD_READ_SW(swa, u16, word);
147ASD_READ_SW(swa, u32, dword);
148
149ASD_READ_SW(swb, u8, byte);
150ASD_READ_SW(swb, u16, word);
151ASD_READ_SW(swb, u32, dword);
152
153ASD_READ_SW(swc, u8, byte);
154ASD_READ_SW(swc, u16, word);
155ASD_READ_SW(swc, u32, dword);
156
157ASD_WRITE_SW(swa, u8, byte);
158ASD_WRITE_SW(swa, u16, word);
159ASD_WRITE_SW(swa, u32, dword);
160
161ASD_WRITE_SW(swb, u8, byte);
162ASD_WRITE_SW(swb, u16, word);
163ASD_WRITE_SW(swb, u32, dword);
164
165ASD_WRITE_SW(swc, u8, byte);
166ASD_WRITE_SW(swc, u16, word);
167ASD_WRITE_SW(swc, u32, dword);
168
169/*
170 * A word about sliding windows:
171 * MBAR0 is divided into sliding windows A, C and B, in that order.
172 * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes.
173 * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes.
174 * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F.
175 * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0.
176 * See asd_init_sw() in aic94xx_hwi.c
177 *
178 * We map the most common registers we'd access of the internal 4GB
179 * host adapter memory space. If a register/internal memory location
180 * is wanted which is not mapped, we slide SWB, by paging it,
181 * see asd_move_swb() in aic94xx_reg.c.
182 */
183
184/**
185 * asd_move_swb -- move sliding window B
186 * @asd_ha: pointer to host adapter structure
187 * @reg: register desired to be within range of the new window
188 */
189static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
190{
191 u32 base = reg & ~(MBAR0_SWB_SIZE-1);
192 pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base);
193 asd_ha->io_handle[0].swb_base = base;
194}
195
196static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val)
197{
198 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
199 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
200 if (io_handle->swa_base <= reg
201 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
202 asd_write_swa_byte (asd_ha, reg,val);
203 else if (io_handle->swb_base <= reg
204 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
205 asd_write_swb_byte (asd_ha, reg, val);
206 else if (io_handle->swc_base <= reg
207 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
208 asd_write_swc_byte (asd_ha, reg, val);
209 else {
210 /* Ok, we have to move SWB */
211 asd_move_swb(asd_ha, reg);
212 asd_write_swb_byte (asd_ha, reg, val);
213 }
214}
215
216#define ASD_WRITE_REG(type, ord) \
217void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\
218{ \
219 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
220 unsigned long flags; \
221 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
222 spin_lock_irqsave(&asd_ha->iolock, flags); \
223 if (io_handle->swa_base <= reg \
224 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
225 asd_write_swa_##ord (asd_ha, reg,val); \
226 else if (io_handle->swb_base <= reg \
227 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
228 asd_write_swb_##ord (asd_ha, reg, val); \
229 else if (io_handle->swc_base <= reg \
230 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
231 asd_write_swc_##ord (asd_ha, reg, val); \
232 else { \
233 /* Ok, we have to move SWB */ \
234 asd_move_swb(asd_ha, reg); \
235 asd_write_swb_##ord (asd_ha, reg, val); \
236 } \
237 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
238}
239
240ASD_WRITE_REG(u8, byte);
241ASD_WRITE_REG(u16,word);
242ASD_WRITE_REG(u32,dword);
243
244static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg)
245{
246 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
247 u8 val;
248 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
249 if (io_handle->swa_base <= reg
250 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
251 val = asd_read_swa_byte (asd_ha, reg);
252 else if (io_handle->swb_base <= reg
253 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
254 val = asd_read_swb_byte (asd_ha, reg);
255 else if (io_handle->swc_base <= reg
256 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
257 val = asd_read_swc_byte (asd_ha, reg);
258 else {
259 /* Ok, we have to move SWB */
260 asd_move_swb(asd_ha, reg);
261 val = asd_read_swb_byte (asd_ha, reg);
262 }
263 return val;
264}
265
266#define ASD_READ_REG(type, ord) \
267type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \
268{ \
269 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
270 type val; \
271 unsigned long flags; \
272 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
273 spin_lock_irqsave(&asd_ha->iolock, flags); \
274 if (io_handle->swa_base <= reg \
275 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
276 val = asd_read_swa_##ord (asd_ha, reg); \
277 else if (io_handle->swb_base <= reg \
278 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
279 val = asd_read_swb_##ord (asd_ha, reg); \
280 else if (io_handle->swc_base <= reg \
281 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
282 val = asd_read_swc_##ord (asd_ha, reg); \
283 else { \
284 /* Ok, we have to move SWB */ \
285 asd_move_swb(asd_ha, reg); \
286 val = asd_read_swb_##ord (asd_ha, reg); \
287 } \
288 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
289 return val; \
290}
291
292ASD_READ_REG(u8, byte);
293ASD_READ_REG(u16,word);
294ASD_READ_REG(u32,dword);
295
296/**
297 * asd_read_reg_string -- read a string of bytes from io space memory
298 * @asd_ha: pointer to host adapter structure
299 * @dst: pointer to a destination buffer where data will be written to
300 * @offs: start offset (register) to read from
301 * @count: number of bytes to read
302 */
303void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
304 u32 offs, int count)
305{
306 u8 *p = dst;
307 unsigned long flags;
308
309 spin_lock_irqsave(&asd_ha->iolock, flags);
310 for ( ; count > 0; count--, offs++, p++)
311 *p = __asd_read_reg_byte(asd_ha, offs);
312 spin_unlock_irqrestore(&asd_ha->iolock, flags);
313}
314
315/**
316 * asd_write_reg_string -- write a string of bytes to io space memory
317 * @asd_ha: pointer to host adapter structure
318 * @src: pointer to source buffer where data will be read from
319 * @offs: start offset (register) to write to
320 * @count: number of bytes to write
321 */
322void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
323 u32 offs, int count)
324{
325 u8 *p = src;
326 unsigned long flags;
327
328 spin_lock_irqsave(&asd_ha->iolock, flags);
329 for ( ; count > 0; count--, offs++, p++)
330 __asd_write_reg_byte(asd_ha, offs, *p);
331 spin_unlock_irqrestore(&asd_ha->iolock, flags);
332}
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h
new file mode 100644
index 000000000000..2279307fd27e
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.h
@@ -0,0 +1,302 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers definitions.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_REG_H_
28#define _AIC94XX_REG_H_
29
30#include <asm/io.h>
31#include "aic94xx_hwi.h"
32
33/* Values */
34#define AIC9410_DEV_REV_B0 0x8
35
36/* MBAR0, SWA, SWB, SWC, internal memory space addresses */
37#define REG_BASE_ADDR 0xB8000000
38#define REG_BASE_ADDR_CSEQCIO 0xB8002000
39#define REG_BASE_ADDR_EXSI 0xB8042800
40
41#define MBAR0_SWA_SIZE 0x58
42extern u32 MBAR0_SWB_SIZE;
43#define MBAR0_SWC_SIZE 0x8
44
45/* MBAR1, points to On Chip Memory */
46#define OCM_BASE_ADDR 0xA0000000
47#define OCM_MAX_SIZE 0x20000
48
49/* Smallest address possible to reference */
50#define ALL_BASE_ADDR OCM_BASE_ADDR
51
52/* PCI configuration space registers */
53#define PCI_IOBAR_OFFSET 4
54
55#define PCI_CONF_MBAR1 0x6C
56#define PCI_CONF_MBAR0_SWA 0x70
57#define PCI_CONF_MBAR0_SWB 0x74
58#define PCI_CONF_MBAR0_SWC 0x78
59#define PCI_CONF_MBAR_KEY 0x7C
60#define PCI_CONF_FLSH_BAR 0xB8
61
62#include "aic94xx_reg_def.h"
63
64u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg);
65u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg);
66u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg);
67
68void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val);
69void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val);
70void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val);
71
72void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
73 u32 offs, int count);
74void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
75 u32 offs, int count);
76
77#define ASD_READ_OCM(type, ord, S) \
78static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \
79 u32 offs) \
80{ \
81 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
82 type val = read##S (io_handle->addr + (unsigned long) offs); \
83 rmb(); \
84 return val; \
85}
86
87ASD_READ_OCM(u8, byte, b);
88ASD_READ_OCM(u16,word, w);
89ASD_READ_OCM(u32,dword,l);
90
91#define ASD_WRITE_OCM(type, ord, S) \
92static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \
93 u32 offs, type val) \
94{ \
95 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
96 write##S (val, io_handle->addr + (unsigned long) offs); \
97 return; \
98}
99
100ASD_WRITE_OCM(u8, byte, b);
101ASD_WRITE_OCM(u16,word, w);
102ASD_WRITE_OCM(u32,dword,l);
103
104#define ASD_DDBSITE_READ(type, ord) \
105static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \
106 u16 ddb_site_no, \
107 u16 offs) \
108{ \
109 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
110 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
111 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
112}
113
114ASD_DDBSITE_READ(u32, dword);
115ASD_DDBSITE_READ(u16, word);
116
117static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha,
118 u16 ddb_site_no,
119 u16 offs)
120{
121 if (offs & 1)
122 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
123 offs & ~1) >> 8;
124 else
125 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
126 offs) & 0xFF;
127}
128
129
130#define ASD_DDBSITE_WRITE(type, ord) \
131static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \
132 u16 ddb_site_no, \
133 u16 offs, type val) \
134{ \
135 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
136 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
137 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
138}
139
140ASD_DDBSITE_WRITE(u32, dword);
141ASD_DDBSITE_WRITE(u16, word);
142
143static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha,
144 u16 ddb_site_no,
145 u16 offs, u8 val)
146{
147 u16 base = offs & ~1;
148 u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
149 if (offs & 1)
150 rval = (val << 8) | (rval & 0xFF);
151 else
152 rval = (rval & 0xFF00) | val;
153 asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval);
154}
155
156
157#define ASD_SCBSITE_READ(type, ord) \
158static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \
159 u16 scb_site_no, \
160 u16 offs) \
161{ \
162 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
163 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
164 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
165}
166
167ASD_SCBSITE_READ(u32, dword);
168ASD_SCBSITE_READ(u16, word);
169
170static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha,
171 u16 scb_site_no,
172 u16 offs)
173{
174 if (offs & 1)
175 return asd_scbsite_read_word(asd_ha, scb_site_no,
176 offs & ~1) >> 8;
177 else
178 return asd_scbsite_read_word(asd_ha, scb_site_no,
179 offs) & 0xFF;
180}
181
182
183#define ASD_SCBSITE_WRITE(type, ord) \
184static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \
185 u16 scb_site_no, \
186 u16 offs, type val) \
187{ \
188 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
189 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
190 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
191}
192
193ASD_SCBSITE_WRITE(u32, dword);
194ASD_SCBSITE_WRITE(u16, word);
195
196static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha,
197 u16 scb_site_no,
198 u16 offs, u8 val)
199{
200 u16 base = offs & ~1;
201 u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base);
202 if (offs & 1)
203 rval = (val << 8) | (rval & 0xFF);
204 else
205 rval = (rval & 0xFF00) | val;
206 asd_scbsite_write_word(asd_ha, scb_site_no, base, rval);
207}
208
209/**
210 * asd_ddbsite_update_word -- atomically update a word in a ddb site
211 * @asd_ha: pointer to host adapter structure
212 * @ddb_site_no: the DDB site number
213 * @offs: the offset into the DDB
214 * @oldval: old value found in that offset
215 * @newval: the new value to replace it
216 *
217 * This function is used when the sequencers are running and we need to
218 * update a DDB site atomically without expensive pausing and upausing
219 * of the sequencers and accessing the DDB site through the CIO bus.
220 *
221 * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value
222 * is different than the current value at that offset.
223 */
224static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha,
225 u16 ddb_site_no, u16 offs,
226 u16 oldval, u16 newval)
227{
228 u8 done;
229 u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs);
230 if (oval != oldval)
231 return -EAGAIN;
232 asd_write_reg_word(asd_ha, AOLDDATA, oldval);
233 asd_write_reg_word(asd_ha, ANEWDATA, newval);
234 do {
235 done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL);
236 } while (!(done & ATOMICDONE));
237 if (done & ATOMICERR)
238 return -EFAULT; /* parity error */
239 else if (done & ATOMICWIN)
240 return 0; /* success */
241 else
242 return -EAGAIN; /* oldval different than current value */
243}
244
245static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha,
246 u16 ddb_site_no, u16 offs,
247 u8 _oldval, u8 _newval)
248{
249 u16 base = offs & ~1;
250 u16 oval;
251 u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
252 if (offs & 1) {
253 if ((nval >> 8) != _oldval)
254 return -EAGAIN;
255 nval = (_newval << 8) | (nval & 0xFF);
256 oval = (_oldval << 8) | (nval & 0xFF);
257 } else {
258 if ((nval & 0xFF) != _oldval)
259 return -EAGAIN;
260 nval = (nval & 0xFF00) | _newval;
261 oval = (nval & 0xFF00) | _oldval;
262 }
263 return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval);
264}
265
266static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg,
267 dma_addr_t dma_handle)
268{
269 asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle));
270 asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle));
271}
272
273static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha)
274{
275 /* DCHREVISION returns 0, possibly broken */
276 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
277 return ctxmemsize ? 65536 : 32768;
278}
279
280static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha)
281{
282 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
283 return ctxmemsize ? 8192 : 4096;
284}
285
286static inline void asd_disable_ints(struct asd_ha_struct *asd_ha)
287{
288 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
289}
290
291static inline void asd_enable_ints(struct asd_ha_struct *asd_ha)
292{
293 /* Enable COM SAS interrupt on errors, COMSTAT */
294 asd_write_reg_dword(asd_ha, COMSTATEN,
295 EN_CSBUFPERR | EN_CSERR | EN_OVLYERR);
296 /* Enable DCH SAS CFIFTOERR */
297 asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR);
298 /* Enable Host Device interrupts */
299 asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN);
300}
301
302#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
new file mode 100644
index 000000000000..b79f45f3ad47
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -0,0 +1,2398 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers defintions.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * Luben Tuikov: Some register value updates to make it work with the window
9 * agnostic register r/w functions. Some register corrections, sizes,
10 * etc.
11 *
12 * This file is licensed under GPLv2.
13 *
14 * This file is part of the aic94xx driver.
15 *
16 * The aic94xx driver is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; version 2 of the
19 * License.
20 *
21 * The aic94xx driver is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with the aic94xx driver; if not, write to the Free Software
28 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 *
30 * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $
31 *
32 */
33
34#ifndef _ADP94XX_REG_DEF_H_
35#define _ADP94XX_REG_DEF_H_
36
37/*
38 * Common definitions.
39 */
40#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */
41#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */
42#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */
43
44/********************* COM_SAS registers definition *************************/
45
46/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h.
47 */
48
49/*
50 * CHIM Registers, Address Range : (0x00-0xFF)
51 */
52#define COMBIST (REG_BASE_ADDR + 0x00)
53
54/* bits 31:24 */
55#define L7BLKRST 0x80000000
56#define L6BLKRST 0x40000000
57#define L5BLKRST 0x20000000
58#define L4BLKRST 0x10000000
59#define L3BLKRST 0x08000000
60#define L2BLKRST 0x04000000
61#define L1BLKRST 0x02000000
62#define L0BLKRST 0x01000000
63#define LmBLKRST 0xFF000000
64#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid))
65
66#define OCMBLKRST 0x00400000
67#define CTXMEMBLKRST 0x00200000
68#define CSEQBLKRST 0x00100000
69#define EXSIBLKRST 0x00040000
70#define DPIBLKRST 0x00020000
71#define DFIFBLKRST 0x00010000
72#define HARDRST 0x00000200
73#define COMBLKRST 0x00000100
74#define FRCDFPERR 0x00000080
75#define FRCCIOPERR 0x00000020
76#define FRCBISTERR 0x00000010
77#define COMBISTEN 0x00000004
78#define COMBISTDONE 0x00000002 /* ro */
79#define COMBISTFAIL 0x00000001 /* ro */
80
81#define COMSTAT (REG_BASE_ADDR + 0x04)
82
83#define REQMBXREAD 0x00000040
84#define RSPMBXAVAIL 0x00000020
85#define CSBUFPERR 0x00000008
86#define OVLYERR 0x00000004
87#define CSERR 0x00000002
88#define OVLYDMADONE 0x00000001
89
90#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \
91 CSBUFPERR | OVLYERR | CSERR |\
92 OVLYDMADONE)
93
94#define COMSTATEN (REG_BASE_ADDR + 0x08)
95
96#define EN_REQMBXREAD 0x00000040
97#define EN_RSPMBXAVAIL 0x00000020
98#define EN_CSBUFPERR 0x00000008
99#define EN_OVLYERR 0x00000004
100#define EN_CSERR 0x00000002
101#define EN_OVLYDONE 0x00000001
102
103#define SCBPRO (REG_BASE_ADDR + 0x0C)
104
105#define SCBCONS_MASK 0xFFFF0000
106#define SCBPRO_MASK 0x0000FFFF
107
108#define CHIMREQMBX (REG_BASE_ADDR + 0x10)
109
110#define CHIMRSPMBX (REG_BASE_ADDR + 0x14)
111
112#define CHIMINT (REG_BASE_ADDR + 0x18)
113
114#define EXT_INT0 0x00000800
115#define EXT_INT1 0x00000400
116#define PORRSTDET 0x00000200
117#define HARDRSTDET 0x00000100
118#define DLAVAILQ 0x00000080 /* ro */
119#define HOSTERR 0x00000040
120#define INITERR 0x00000020
121#define DEVINT 0x00000010
122#define COMINT 0x00000008
123#define DEVTIMER2 0x00000004
124#define DEVTIMER1 0x00000002
125#define DLAVAIL 0x00000001
126
127#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\
128 DEVTIMER2 | DEVTIMER1 | DLAVAIL)
129
130#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT)
131
132#define CHIMINTEN (REG_BASE_ADDR + 0x1C)
133
134#define RST_EN_EXT_INT1 0x01000000
135#define RST_EN_EXT_INT0 0x00800000
136#define RST_EN_HOSTERR 0x00400000
137#define RST_EN_INITERR 0x00200000
138#define RST_EN_DEVINT 0x00100000
139#define RST_EN_COMINT 0x00080000
140#define RST_EN_DEVTIMER2 0x00040000
141#define RST_EN_DEVTIMER1 0x00020000
142#define RST_EN_DLAVAIL 0x00010000
143#define SET_EN_EXT_INT1 0x00000100
144#define SET_EN_EXT_INT0 0x00000080
145#define SET_EN_HOSTERR 0x00000040
146#define SET_EN_INITERR 0x00000020
147#define SET_EN_DEVINT 0x00000010
148#define SET_EN_COMINT 0x00000008
149#define SET_EN_DEVTIMER2 0x00000004
150#define SET_EN_DEVTIMER1 0x00000002
151#define SET_EN_DLAVAIL 0x00000001
152
153#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \
154 RST_EN_DEVINT | RST_EN_COMINT | \
155 RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\
156 RST_EN_DLAVAIL)
157
158#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\
159 SET_EN_DEVINT | SET_EN_COMINT |\
160 SET_EN_DLAVAIL)
161
162#define OVLYDMACTL (REG_BASE_ADDR + 0x20)
163
164#define OVLYADR_MASK 0x07FF0000
165#define OVLYLSEQ_MASK 0x0000FF00
166#define OVLYCSEQ 0x00000080
167#define OVLYHALTERR 0x00000040
168#define PIOCMODE 0x00000020
169#define RESETOVLYDMA 0x00000008 /* wo */
170#define STARTOVLYDMA 0x00000004
171#define STOPOVLYDMA 0x00000002 /* wo */
172#define OVLYDMAACT 0x00000001 /* ro */
173
174#define OVLYDMACNT (REG_BASE_ADDR + 0x24)
175
176#define OVLYDOMAIN1 0x20000000 /* ro */
177#define OVLYDOMAIN0 0x10000000
178#define OVLYBUFADR_MASK 0x007F0000
179#define OVLYDMACNT_MASK 0x00003FFF
180
181#define OVLYDMAADR (REG_BASE_ADDR + 0x28)
182
183#define DMAERR (REG_BASE_ADDR + 0x30)
184
185#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */
186#define CSERRSTAT_MASK 0x000000FF /* ro */
187
188#define SPIODATA (REG_BASE_ADDR + 0x34)
189
190/* 0x38 - 0x3C are reserved */
191
192#define T1CNTRLR (REG_BASE_ADDR + 0x40)
193
194#define T1DONE 0x00010000 /* ro */
195#define TIMER64 0x00000400
196#define T1ENABLE 0x00000200
197#define T1RELOAD 0x00000100
198#define T1PRESCALER_MASK 0x00000003
199
200#define T1CMPR (REG_BASE_ADDR + 0x44)
201
202#define T1CNTR (REG_BASE_ADDR + 0x48)
203
204#define T2CNTRLR (REG_BASE_ADDR + 0x4C)
205
206#define T2DONE 0x00010000 /* ro */
207#define T2ENABLE 0x00000200
208#define T2RELOAD 0x00000100
209#define T2PRESCALER_MASK 0x00000003
210
211#define T2CMPR (REG_BASE_ADDR + 0x50)
212
213#define T2CNTR (REG_BASE_ADDR + 0x54)
214
215/* 0x58h - 0xFCh are reserved */
216
217/*
218 * DCH_SAS Registers, Address Range : (0x800-0xFFF)
219 */
220#define CMDCTXBASE (REG_BASE_ADDR + 0x800)
221
222#define DEVCTXBASE (REG_BASE_ADDR + 0x808)
223
224#define CTXDOMAIN (REG_BASE_ADDR + 0x810)
225
226#define DEVCTXDOMAIN1 0x00000008 /* ro */
227#define DEVCTXDOMAIN0 0x00000004
228#define CMDCTXDOMAIN1 0x00000002 /* ro */
229#define CMDCTXDOMAIN0 0x00000001
230
231#define DCHCTL (REG_BASE_ADDR + 0x814)
232
233#define OCMBISTREPAIR 0x00080000
234#define OCMBISTEN 0x00040000
235#define OCMBISTDN 0x00020000 /* ro */
236#define OCMBISTFAIL 0x00010000 /* ro */
237#define DDBBISTEN 0x00004000
238#define DDBBISTDN 0x00002000 /* ro */
239#define DDBBISTFAIL 0x00001000 /* ro */
240#define SCBBISTEN 0x00000400
241#define SCBBISTDN 0x00000200 /* ro */
242#define SCBBISTFAIL 0x00000100 /* ro */
243
244#define MEMSEL_MASK 0x000000E0
245#define MEMSEL_CCM_LSEQ 0x00000000
246#define MEMSEL_CCM_IOP 0x00000020
247#define MEMSEL_CCM_SASCTL 0x00000040
248#define MEMSEL_DCM_LSEQ 0x00000060
249#define MEMSEL_DCM_IOP 0x00000080
250#define MEMSEL_OCM 0x000000A0
251
252#define FRCERR 0x00000010
253#define AUTORLS 0x00000001
254
255#define DCHREVISION (REG_BASE_ADDR + 0x818)
256
257#define DCHREVISION_MASK 0x000000FF
258
259#define DCHSTATUS (REG_BASE_ADDR + 0x81C)
260
261#define EN_CFIFTOERR 0x00020000
262#define CFIFTOERR 0x00000200
263#define CSEQINT 0x00000100 /* ro */
264#define LSEQ7INT 0x00000080 /* ro */
265#define LSEQ6INT 0x00000040 /* ro */
266#define LSEQ5INT 0x00000020 /* ro */
267#define LSEQ4INT 0x00000010 /* ro */
268#define LSEQ3INT 0x00000008 /* ro */
269#define LSEQ2INT 0x00000004 /* ro */
270#define LSEQ1INT 0x00000002 /* ro */
271#define LSEQ0INT 0x00000001 /* ro */
272
273#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\
274 LSEQ4INT | LSEQ3INT | LSEQ2INT |\
275 LSEQ1INT | LSEQ0INT)
276
277#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820)
278#define ENFAIRMST 0x00FF0000
279#define DISWRMST9 0x00000200
280#define DISWRMST8 0x00000100
281#define DISRDMST 0x000000FF
282
283#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824)
284/* 8 bit wide */
285#define AUTOINC 0x80
286#define ATOMICERR 0x04
287#define ATOMICWIN 0x02
288#define ATOMICDONE 0x01
289
290
291#define ALTCIOADR (REG_BASE_ADDR + 0x828)
292/* 16 bit; bits 8:0 define CIO addr space of CSEQ */
293
294#define ASCBPTR (REG_BASE_ADDR + 0x82C)
295/* 16 bit wide */
296
297#define ADDBPTR (REG_BASE_ADDR + 0x82E)
298/* 16 bit wide */
299
300#define ANEWDATA (REG_BASE_ADDR + 0x830)
301/* 16 bit */
302
303#define AOLDDATA (REG_BASE_ADDR + 0x834)
304/* 16 bit */
305
306#define CTXACCESS (REG_BASE_ADDR + 0x838)
307/* 32 bit */
308
309/* 0x83Ch - 0xFFCh are reserved */
310
311/*
312 * ARP2 External Processor Registers, Address Range : (0x00-0x1F)
313 */
314#define ARP2CTL 0x00
315
316#define FRCSCRPERR 0x00040000
317#define FRCARP2PERR 0x00020000
318#define FRCARP2ILLOPC 0x00010000
319#define ENWAITTO 0x00008000
320#define PERRORDIS 0x00004000
321#define FAILDIS 0x00002000
322#define CIOPERRDIS 0x00001000
323#define BREAKEN3 0x00000800
324#define BREAKEN2 0x00000400
325#define BREAKEN1 0x00000200
326#define BREAKEN0 0x00000100
327#define EPAUSE 0x00000008
328#define PAUSED 0x00000004 /* ro */
329#define STEP 0x00000002
330#define ARP2RESET 0x00000001 /* wo */
331
332#define ARP2INT 0x04
333
334#define HALTCODE_MASK 0x00FF0000 /* ro */
335#define ARP2WAITTO 0x00000100
336#define ARP2HALTC 0x00000080
337#define ARP2ILLOPC 0x00000040
338#define ARP2PERR 0x00000020
339#define ARP2CIOPERR 0x00000010
340#define ARP2BREAK3 0x00000008
341#define ARP2BREAK2 0x00000004
342#define ARP2BREAK1 0x00000002
343#define ARP2BREAK0 0x00000001
344
345#define ARP2INTEN 0x08
346
347#define EN_ARP2WAITTO 0x00000100
348#define EN_ARP2HALTC 0x00000080
349#define EN_ARP2ILLOPC 0x00000040
350#define EN_ARP2PERR 0x00000020
351#define EN_ARP2CIOPERR 0x00000010
352#define EN_ARP2BREAK3 0x00000008
353#define EN_ARP2BREAK2 0x00000004
354#define EN_ARP2BREAK1 0x00000002
355#define EN_ARP2BREAK0 0x00000001
356
357#define ARP2BREAKADR01 0x0C
358
359#define BREAKADR1_MASK 0x0FFF0000
360#define BREAKADR0_MASK 0x00000FFF
361
362#define ARP2BREAKADR23 0x10
363
364#define BREAKADR3_MASK 0x0FFF0000
365#define BREAKADR2_MASK 0x00000FFF
366
367/* 0x14h - 0x1Ch are reserved */
368
369/*
370 * ARP2 Registers, Address Range : (0x00-0x1F)
371 * The definitions have the same address offset for CSEQ and LmSEQ
372 * CIO Bus Registers.
373 */
374#define MODEPTR 0x00
375
376#define DSTMODE 0xF0
377#define SRCMODE 0x0F
378
379#define ALTMODE 0x01
380
381#define ALTDMODE 0xF0
382#define ALTSMODE 0x0F
383
384#define ATOMICXCHG 0x02
385
386#define FLAG 0x04
387
388#define INTCODE_MASK 0xF0
389#define ALTMODEV2 0x04
390#define CARRY_INT 0x02
391#define CARRY 0x01
392
393#define ARP2INTCTL 0x05
394
395#define PAUSEDIS 0x80
396#define RSTINTCTL 0x40
397#define POPALTMODE 0x08
398#define ALTMODEV 0x04
399#define INTMASK 0x02
400#define IRET 0x01
401
402#define STACK 0x06
403
404#define FUNCTION1 0x07
405
406#define PRGMCNT 0x08
407
408#define ACCUM 0x0A
409
410#define SINDEX 0x0C
411
412#define DINDEX 0x0E
413
414#define ALLONES 0x10
415
416#define ALLZEROS 0x11
417
418#define SINDIR 0x12
419
420#define DINDIR 0x13
421
422#define JUMLDIR 0x14
423
424#define ARP2HALTCODE 0x15
425
426#define CURRADDR 0x16
427
428#define LASTADDR 0x18
429
430#define NXTLADDR 0x1A
431
432#define DBGPORTPTR 0x1C
433
434#define DBGPORT 0x1D
435
436/*
437 * CIO Registers.
438 * The definitions have the same address offset for CSEQ and LmSEQ
439 * CIO Bus Registers.
440 */
441#define MnSCBPTR 0x20
442
443#define MnDDBPTR 0x22
444
445#define SCRATCHPAGE 0x24
446
447#define MnSCRATCHPAGE 0x25
448
449#define SCRATCHPAGESV 0x26
450
451#define MnSCRATCHPAGESV 0x27
452
453#define MnDMAERRS 0x46
454
455#define MnSGDMAERRS 0x47
456
457#define MnSGBUF 0x53
458
459#define MnSGDMASTAT 0x5b
460
461#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */
462
463#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */
464
465#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */
466
467#define MnDMAENG 0x60
468
469#define MnPIPECTL 0x61
470
471#define MnSGBADR 0x65
472
473#define MnSCB_SITE 0x100
474
475#define MnDDB_SITE 0x180
476
477/*
478 * The common definitions below have the same address offset for both
479 * CSEQ and LmSEQ.
480 */
481#define BISTCTL0 0x4C
482
483#define BISTCTL1 0x50
484
485#define MAPPEDSCR 0x800
486
487/*
488 * CSEQ Host Register, Address Range : (0x000-0xFFC)
489 */
490#define CSEQ_HOST_REG_BASE_ADR 0xB8001000
491
492#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL)
493
494#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT)
495
496#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN)
497
498#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01)
499
500#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23)
501
502#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1)
503
504#define CSEQRAMBISTEN 0x00000040
505#define CSEQRAMBISTDN 0x00000020 /* ro */
506#define CSEQRAMBISTFAIL 0x00000010 /* ro */
507#define CSEQSCRBISTEN 0x00000004
508#define CSEQSCRBISTDN 0x00000002 /* ro */
509#define CSEQSCRBISTFAIL 0x00000001 /* ro */
510
511#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR)
512
513/*
514 * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC)
515 * 16 modes, each mode is 512 bytes.
516 * Unless specified, the register should valid for all modes.
517 */
518#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO
519
520#define CSEQm_CIO_REG(Mode, Reg) \
521 (CSEQ_CIO_REG_BASE_ADR + \
522 ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg))
523
524#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR)
525
526#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE)
527
528#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG)
529
530#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG)
531
532#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL)
533
534#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK)
535
536#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1)
537
538#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT)
539
540#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM)
541
542#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX)
543
544#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX)
545
546#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES)
547
548#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS)
549
550#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR)
551
552#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR)
553
554#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR)
555
556#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE)
557
558#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR)
559
560#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR)
561
562#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR)
563
564#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR)
565
566#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT)
567
568#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE)
569
570#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR)
571
572#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR)
573
574#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE)
575
576#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28)
577
578#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C)
579
580/* mode 0-7 */
581#define MnREQMBX 0x30
582#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30)
583
584/* mode 8 */
585#define CSEQCON CSEQm_CIO_REG(8, 0x30)
586
587/* mode 0-7 */
588#define MnRSPMBX 0x34
589#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34)
590
591/* mode 8 */
592#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34)
593
594/* mode 8 */
595#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35)
596
597/* mode 8 */
598#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36)
599
600/* mode 8 */
601#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37)
602
603#define CSHALTERR 0x10
604#define RESETCSDMA 0x08 /* wo */
605#define STARTCSDMA 0x04
606#define STOPCSDMA 0x02 /* wo */
607#define CSDMAACT 0x01 /* ro */
608
609/* mode 0-7 */
610#define MnINT 0x38
611#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38)
612
613#define CMnREQMBXE 0x02
614#define CMnRSPMBXF 0x01
615#define CMnINT_MASK 0x00000003
616
617/* mode 8 */
618#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38)
619
620/* mode 0-7 */
621#define MnINTEN 0x3C
622#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C)
623
624#define EN_CMnRSPMBXF 0x01
625
626/* mode 8 */
627#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C)
628
629/* mode 8 */
630#define CSDMAADR CSEQm_CIO_REG(8, 0x40)
631
632/* mode 8 */
633#define CSDMACNT CSEQm_CIO_REG(8, 0x48)
634
635/* mode 8 */
636#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D)
637
638#define DONELISTEND 0x10
639#define DONELISTSIZE_MASK 0x0F
640#define DONELISTSIZE_8ELEM 0x01
641#define DONELISTSIZE_16ELEM 0x02
642#define DONELISTSIZE_32ELEM 0x03
643#define DONELISTSIZE_64ELEM 0x04
644#define DONELISTSIZE_128ELEM 0x05
645#define DONELISTSIZE_256ELEM 0x06
646#define DONELISTSIZE_512ELEM 0x07
647#define DONELISTSIZE_1024ELEM 0x08
648#define DONELISTSIZE_2048ELEM 0x09
649#define DONELISTSIZE_4096ELEM 0x0A
650#define DONELISTSIZE_8192ELEM 0x0B
651#define DONELISTSIZE_16384ELEM 0x0C
652
653/* mode 8 */
654#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E)
655
656/* mode 11 */
657#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50)
658
659/* mode 11 */
660#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52)
661
662/* mode 11 */
663#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54)
664
665#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60)
666
667#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61)
668
669#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62)
670
671#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64)
672
673#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68)
674
675/* mode 8, 32x32 bits, 128 bytes of mapped buffer */
676#define CSBUFFER CSEQm_CIO_REG(8, 0x80)
677
678#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0)
679
680/* mode 0-8 */
681#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0)
682
683/*
684 * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC)
685 */
686#define CSEQ_RAM_REG_BASE_ADR 0xB8004000
687
688/*
689 * The common definitions below have the same address offset for all the Link
690 * sequencers.
691 */
692#define MODECTL 0x40
693
694#define DBGMODE 0x44
695
696#define CONTROL 0x48
697#define LEDTIMER 0x00010000
698#define LEDTIMERS_10us 0x00000000
699#define LEDTIMERS_1ms 0x00000800
700#define LEDTIMERS_100ms 0x00001000
701#define LEDMODE_TXRX 0x00000000
702#define LEDMODE_CONNECTED 0x00000200
703#define LEDPOL 0x00000100
704
705#define LSEQRAM 0x1000
706
707/*
708 * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC)
709 */
710#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000
711#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000
712#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000
713#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000
714#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000
715#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000
716#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000
717#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000
718
719#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
720 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
721 ARP2CTL)
722
723#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
724 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
725 ARP2INT)
726
727#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
728 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
729 ARP2INTEN)
730
731#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
732 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
733 DBGMODE)
734
735#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
736 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
737 CONTROL)
738
739#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
740 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
741 ARP2BREAKADR01)
742
743#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
744 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
745 ARP2BREAKADR23)
746
747#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
748 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
749 MODECTL)
750
751#define LmAUTODISCI 0x08000000
752#define LmDSBLBITLT 0x04000000
753#define LmDSBLANTT 0x02000000
754#define LmDSBLCRTT 0x01000000
755#define LmDSBLCONT 0x00000100
756#define LmPRIMODE 0x00000080
757#define LmDSBLHOLD 0x00000040
758#define LmDISACK 0x00000020
759#define LmBLIND48 0x00000010
760#define LmRCVMODE_MASK 0x0000000C
761#define LmRCVMODE_PLD 0x00000000
762#define LmRCVMODE_HPC 0x00000004
763
764#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
765 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
766 DBGMODE)
767
768#define LmFRCPERR 0x80000000
769#define LmMEMSEL_MASK 0x30000000
770#define LmFRCRBPERR 0x00000000
771#define LmFRCTBPERR 0x10000000
772#define LmFRCSGBPERR 0x20000000
773#define LmFRCARBPERR 0x30000000
774#define LmRCVIDW 0x00080000
775#define LmINVDWERR 0x00040000
776#define LmRCVDISP 0x00004000
777#define LmDISPERR 0x00002000
778#define LmDSBLDSCR 0x00000800
779#define LmDSBLSCR 0x00000400
780#define LmFRCNAK 0x00000200
781#define LmFRCROFS 0x00000100
782#define LmFRCCRC 0x00000080
783#define LmFRMTYPE_MASK 0x00000070
784#define LmSG_DATA 0x00000000
785#define LmSG_COMMAND 0x00000010
786#define LmSG_TASK 0x00000020
787#define LmSG_TGTXFER 0x00000030
788#define LmSG_RESPONSE 0x00000040
789#define LmSG_IDENADDR 0x00000050
790#define LmSG_OPENADDR 0x00000060
791#define LmDISCRCGEN 0x00000008
792#define LmDISCRCCHK 0x00000004
793#define LmSSXMTFRM 0x00000002
794#define LmSSRCVFRM 0x00000001
795
796#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
797 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
798 CONTROL)
799
800#define LmSTEPXMTFRM 0x00000002
801#define LmSTEPRCVFRM 0x00000001
802
803#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
804 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
805 BISTCTL0)
806
807#define ARBBISTEN 0x40000000
808#define ARBBISTDN 0x20000000 /* ro */
809#define ARBBISTFAIL 0x10000000 /* ro */
810#define TBBISTEN 0x00000400
811#define TBBISTDN 0x00000200 /* ro */
812#define TBBISTFAIL 0x00000100 /* ro */
813#define RBBISTEN 0x00000040
814#define RBBISTDN 0x00000020 /* ro */
815#define RBBISTFAIL 0x00000010 /* ro */
816#define SGBISTEN 0x00000004
817#define SGBISTDN 0x00000002 /* ro */
818#define SGBISTFAIL 0x00000001 /* ro */
819
820#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
821 ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\
822 BISTCTL1)
823
824#define LmRAMPAGE1 0x00000200
825#define LmRAMPAGE0 0x00000100
826#define LmIMEMBISTEN 0x00000040
827#define LmIMEMBISTDN 0x00000020 /* ro */
828#define LmIMEMBISTFAIL 0x00000010 /* ro */
829#define LmSCRBISTEN 0x00000004
830#define LmSCRBISTDN 0x00000002 /* ro */
831#define LmSCRBISTFAIL 0x00000001 /* ro */
832#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0)
833#define LmRAMPAGE_LSHIFT 0x8
834
835#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
836 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
837 MAPPEDSCR)
838
839#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
840 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
841 LSEQRAM)
842
843/*
844 * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC)
845 * 8 modes, each mode is 512 bytes.
846 * Unless specified, the register should valid for all modes.
847 */
848#define LmSEQ_CIOBUS_REG_BASE 0x2000
849
850#define LmSEQ_PHY_BASE(Mode, LinkNum) \
851 (LSEQ0_HOST_REG_BASE_ADR + \
852 (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \
853 LmSEQ_CIOBUS_REG_BASE + \
854 ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE))
855
856#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \
857 (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg))
858
859#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR)
860
861#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE)
862
863#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG)
864
865#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG)
866
867#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL)
868
869#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK)
870
871#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1)
872
873#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT)
874
875#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM)
876
877#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX)
878
879#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX)
880
881#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES)
882
883#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS)
884
885#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR)
886
887#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR)
888
889#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR)
890
891#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE)
892
893#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR)
894
895#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR)
896
897#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR)
898
899#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR)
900
901#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT)
902
903#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE)
904
905#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \
906 MnSCRATCHPAGE)
907
908#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28)
909
910#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30)
911
912#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34)
913
914#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38)
915
916#define CTXMEMSIZE 0x80000000 /* ro */
917#define LmACKREQ 0x08000000
918#define LmNAKREQ 0x04000000
919#define LmMnXMTERR 0x02000000
920#define LmM5OOBSVC 0x01000000
921#define LmHWTINT 0x00800000
922#define LmMnCTXDONE 0x00100000
923#define LmM2REQMBXF 0x00080000
924#define LmM2RSPMBXE 0x00040000
925#define LmMnDMAERR 0x00020000
926#define LmRCVPRIM 0x00010000
927#define LmRCVERR 0x00008000
928#define LmADDRRCV 0x00004000
929#define LmMnHDRMISS 0x00002000
930#define LmMnWAITSCB 0x00001000
931#define LmMnRLSSCB 0x00000800
932#define LmMnSAVECTX 0x00000400
933#define LmMnFETCHSG 0x00000200
934#define LmMnLOADCTX 0x00000100
935#define LmMnCFGICL 0x00000080
936#define LmMnCFGSATA 0x00000040
937#define LmMnCFGEXPSATA 0x00000020
938#define LmMnCFGCMPLT 0x00000010
939#define LmMnCFGRBUF 0x00000008
940#define LmMnSAVETTR 0x00000004
941#define LmMnCFGRDAT 0x00000002
942#define LmMnCFGHDR 0x00000001
943
944#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C)
945
946#define EN_LmACKREQ 0x08000000
947#define EN_LmNAKREQ 0x04000000
948#define EN_LmMnXMTERR 0x02000000
949#define EN_LmM5OOBSVC 0x01000000
950#define EN_LmHWTINT 0x00800000
951#define EN_LmMnCTXDONE 0x00100000
952#define EN_LmM2REQMBXF 0x00080000
953#define EN_LmM2RSPMBXE 0x00040000
954#define EN_LmMnDMAERR 0x00020000
955#define EN_LmRCVPRIM 0x00010000
956#define EN_LmRCVERR 0x00008000
957#define EN_LmADDRRCV 0x00004000
958#define EN_LmMnHDRMISS 0x00002000
959#define EN_LmMnWAITSCB 0x00001000
960#define EN_LmMnRLSSCB 0x00000800
961#define EN_LmMnSAVECTX 0x00000400
962#define EN_LmMnFETCHSG 0x00000200
963#define EN_LmMnLOADCTX 0x00000100
964#define EN_LmMnCFGICL 0x00000080
965#define EN_LmMnCFGSATA 0x00000040
966#define EN_LmMnCFGEXPSATA 0x00000020
967#define EN_LmMnCFGCMPLT 0x00000010
968#define EN_LmMnCFGRBUF 0x00000008
969#define EN_LmMnSAVETTR 0x00000004
970#define EN_LmMnCFGRDAT 0x00000002
971#define EN_LmMnCFGHDR 0x00000001
972
973#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \
974 EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \
975 EN_LmMnCFGHDR | EN_LmRCVERR | \
976 EN_LmADDRRCV | EN_LmMnHDRMISS | \
977 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
978 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
979 EN_LmHWTINT | EN_LmMnCTXDONE | \
980 EN_LmRCVPRIM | EN_LmMnCFGSATA | \
981 EN_LmMnCFGEXPSATA | EN_LmMnDMAERR)
982
983#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \
984 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
985 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
986 EN_LmMnXMTERR | EN_LmHWTINT | \
987 EN_LmMnCTXDONE | EN_LmRCVPRIM | \
988 EN_LmRCVERR | EN_LmMnDMAERR)
989
990#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \
991 EN_LmM2REQMBXF | EN_LmRCVPRIM | \
992 EN_LmRCVERR)
993
994#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \
995 EN_LmHWTINT | EN_LmRCVPRIM | \
996 EN_LmRCVERR)
997
998#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40)
999
1000#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44)
1001
1002#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45)
1003
1004#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46)
1005
1006#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47)
1007
1008#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48)
1009
1010#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48)
1011#define SAS_ALIGN_DEFAULT 0xFF
1012
1013#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49)
1014
1015#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49)
1016#define STP_ALIGN_DEFAULT 0x1F
1017
1018#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A)
1019
1020#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A)
1021
1022#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B)
1023
1024#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B)
1025
1026#define LmDISALIGN 0x20
1027#define LmROTSTPALIGN 0x10
1028#define LmSTPALIGN 0x08
1029#define LmROTNOTIFY 0x04
1030#define LmDUALALIGN 0x02
1031#define LmROTALIGN 0x01
1032
1033#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C)
1034
1035#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C)
1036
1037#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E)
1038
1039#define LmMnBUFPERR 0x01
1040
1041/* mode 0-1 */
1042#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59)
1043
1044#define LmMnXFRLVL_128 0x05
1045#define LmMnXFRLVL_256 0x04
1046#define LmMnXFRLVL_512 0x03
1047#define LmMnXFRLVL_1024 0x02
1048#define LmMnXFRLVL_1536 0x01
1049#define LmMnXFRLVL_2048 0x00
1050
1051 /* mode 0-1 */
1052#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A)
1053
1054#define LmMnRESETSG 0x04
1055#define LmMnSTOPSG 0x02
1056#define LmMnSTARTSG 0x01
1057
1058/* mode 0-1 */
1059#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B)
1060
1061/* mode 0-1 */
1062#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C)
1063
1064#define LmMnFLUSH 0x40 /* wo */
1065#define LmMnRLSRTRY 0x20 /* wo */
1066#define LmMnDISCARD 0x10 /* wo */
1067#define LmMnRESETDAT 0x08 /* wo */
1068#define LmMnSUSDAT 0x04 /* wo */
1069#define LmMnSTOPDAT 0x02 /* wo */
1070#define LmMnSTARTDAT 0x01 /* wo */
1071
1072/* mode 0-1 */
1073#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D)
1074
1075#define LmMnDPEMPTY 0x80
1076#define LmMnFLUSHING 0x40
1077#define LmMnDDMAREQ 0x20
1078#define LmMnHDMAREQ 0x10
1079#define LmMnDATFREE 0x08
1080#define LmMnDATSUS 0x04
1081#define LmMnDATACT 0x02
1082#define LmMnDATEN 0x01
1083
1084/* mode 0-1 */
1085#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E)
1086
1087#define LmMnDMATYPE_NORMAL 0x0000
1088#define LmMnDMATYPE_HOST_ONLY_TX 0x0001
1089#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002
1090#define LmMnDMATYPE_INVALID 0x0003
1091#define LmMnDMATYPE_MASK 0x0003
1092
1093#define LmMnDMAWRAP 0x0004
1094#define LmMnBITBUCKET 0x0008
1095#define LmMnDISHDR 0x0010
1096#define LmMnSTPCRC 0x0020
1097#define LmXTEST 0x0040
1098#define LmMnDISCRC 0x0080
1099#define LmMnENINTLK 0x0100
1100#define LmMnADDRFRM 0x0400
1101#define LmMnENXMTCRC 0x0800
1102
1103/* mode 0-1 */
1104#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70)
1105
1106/* mode 0-1 */
1107#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B)
1108#define LmMnDPSEL_MASK 0x07
1109#define LmMnEOLPRE 0x40
1110#define LmMnEOSPRE 0x80
1111
1112/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1113/* Receive Mode n = 0 */
1114#define LmMnHRADDR 0x00
1115#define LmMnHBYTECNT 0x01
1116#define LmMnHREWIND 0x02
1117#define LmMnDWADDR 0x03
1118#define LmMnDSPACECNT 0x04
1119#define LmMnDFRMSIZE 0x05
1120
1121/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1122/* Transmit Mode n = 1 */
1123#define LmMnHWADDR 0x00
1124#define LmMnHSPACECNT 0x01
1125/* #define LmMnHREWIND 0x02 */
1126#define LmMnDRADDR 0x03
1127#define LmMnDBYTECNT 0x04
1128/* #define LmMnDFRMSIZE 0x05 */
1129
1130/* mode 0-1 */
1131#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78)
1132#define LmMnDPACC_MASK 0x00FFFFFF
1133
1134/* mode 0-1 */
1135#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D)
1136
1137#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80)
1138#define LmPRMSTAT0BYTE0 0x80
1139#define LmPRMSTAT0BYTE1 0x81
1140#define LmPRMSTAT0BYTE2 0x82
1141#define LmPRMSTAT0BYTE3 0x83
1142
1143#define LmFRAMERCVD 0x80000000
1144#define LmXFRRDYRCVD 0x40000000
1145#define LmUNKNOWNP 0x20000000
1146#define LmBREAK 0x10000000
1147#define LmDONE 0x08000000
1148#define LmOPENACPT 0x04000000
1149#define LmOPENRJCT 0x02000000
1150#define LmOPENRTRY 0x01000000
1151#define LmCLOSERV1 0x00800000
1152#define LmCLOSERV0 0x00400000
1153#define LmCLOSENORM 0x00200000
1154#define LmCLOSECLAF 0x00100000
1155#define LmNOTIFYRV2 0x00080000
1156#define LmNOTIFYRV1 0x00040000
1157#define LmNOTIFYRV0 0x00020000
1158#define LmNOTIFYSPIN 0x00010000
1159#define LmBROADRV4 0x00008000
1160#define LmBROADRV3 0x00004000
1161#define LmBROADRV2 0x00002000
1162#define LmBROADRV1 0x00001000
1163#define LmBROADSES 0x00000800
1164#define LmBROADRVCH1 0x00000400
1165#define LmBROADRVCH0 0x00000200
1166#define LmBROADCH 0x00000100
1167#define LmAIPRVWP 0x00000080
1168#define LmAIPWP 0x00000040
1169#define LmAIPWD 0x00000020
1170#define LmAIPWC 0x00000010
1171#define LmAIPRV2 0x00000008
1172#define LmAIPRV1 0x00000004
1173#define LmAIPRV0 0x00000002
1174#define LmAIPNRML 0x00000001
1175
1176#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \
1177 LmBROADRVCH1)
1178
1179#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84)
1180#define LmPRMSTAT1BYTE0 0x84
1181#define LmPRMSTAT1BYTE1 0x85
1182#define LmPRMSTAT1BYTE2 0x86
1183#define LmPRMSTAT1BYTE3 0x87
1184
1185#define LmFRMRCVDSTAT 0x80000000
1186#define LmBREAK_DET 0x04000000
1187#define LmCLOSE_DET 0x02000000
1188#define LmDONE_DET 0x01000000
1189#define LmXRDY 0x00040000
1190#define LmSYNCSRST 0x00020000
1191#define LmSYNC 0x00010000
1192#define LmXHOLD 0x00008000
1193#define LmRRDY 0x00004000
1194#define LmHOLD 0x00002000
1195#define LmROK 0x00001000
1196#define LmRIP 0x00000800
1197#define LmCRBLK 0x00000400
1198#define LmACK 0x00000200
1199#define LmNAK 0x00000100
1200#define LmHARDRST 0x00000080
1201#define LmERROR 0x00000040
1202#define LmRERR 0x00000020
1203#define LmPMREQP 0x00000010
1204#define LmPMREQS 0x00000008
1205#define LmPMACK 0x00000004
1206#define LmPMNAK 0x00000002
1207#define LmDMAT 0x00000001
1208
1209/* mode 1 */
1210#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E)
1211#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93)
1212
1213/* mode 0 */
1214#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0)
1215
1216#define LmACRCERR 0x00000800
1217#define LmPHYOVRN 0x00000400
1218#define LmOBOVRN 0x00000200
1219#define LmMnZERODATA 0x00000100
1220#define LmSATAINTLK 0x00000080
1221#define LmMnCRCERR 0x00000020
1222#define LmRRDYOVRN 0x00000010
1223#define LmMISSSOAF 0x00000008
1224#define LmMISSSOF 0x00000004
1225#define LmMISSEOAF 0x00000002
1226#define LmMISSEOF 0x00000001
1227
1228#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4)
1229
1230#define EN_LmACRCERR 0x00000800
1231#define EN_LmPHYOVRN 0x00000400
1232#define EN_LmOBOVRN 0x00000200
1233#define EN_LmMnZERODATA 0x00000100
1234#define EN_LmSATAINTLK 0x00000080
1235#define EN_LmFRMBAD 0x00000040
1236#define EN_LmMnCRCERR 0x00000020
1237#define EN_LmRRDYOVRN 0x00000010
1238#define EN_LmMISSSOAF 0x00000008
1239#define EN_LmMISSSOF 0x00000004
1240#define EN_LmMISSEOAF 0x00000002
1241#define EN_LmMISSEOF 0x00000001
1242
1243#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \
1244 EN_LmRRDYOVRN | EN_LmMISSSOF | \
1245 EN_LmMISSEOAF | EN_LmMISSEOF | \
1246 EN_LmACRCERR | LmPHYOVRN | \
1247 EN_LmOBOVRN | EN_LmMnZERODATA)
1248
1249#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5)
1250
1251#define EN_LmDONETO 0x80
1252#define EN_LmINVDISP 0x40
1253#define EN_LmINVDW 0x20
1254#define EN_LmDWSEVENT 0x08
1255#define EN_LmCRTTTO 0x04
1256#define EN_LmANTTTO 0x02
1257#define EN_LmBITLTTO 0x01
1258
1259#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \
1260 EN_LmDWSEVENT | EN_LmCRTTTO | \
1261 EN_LmANTTTO | EN_LmDONETO | \
1262 EN_LmBITLTTO)
1263
1264#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7)
1265
1266#define LmDONETO 0x80
1267#define LmINVDISP 0x40
1268#define LmINVDW 0x20
1269#define LmDWSEVENT 0x08
1270#define LmCRTTTO 0x04
1271#define LmANTTTO 0x02
1272#define LmBITLTTO 0x01
1273
1274#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8)
1275#define LmDATABUFADR_MASK 0x0FFF
1276
1277#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA)
1278
1279#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0)
1280
1281#define EN_LmUNKNOWNP 0x20000000
1282#define EN_LmBREAK 0x10000000
1283#define EN_LmDONE 0x08000000
1284#define EN_LmOPENACPT 0x04000000
1285#define EN_LmOPENRJCT 0x02000000
1286#define EN_LmOPENRTRY 0x01000000
1287#define EN_LmCLOSERV1 0x00800000
1288#define EN_LmCLOSERV0 0x00400000
1289#define EN_LmCLOSENORM 0x00200000
1290#define EN_LmCLOSECLAF 0x00100000
1291#define EN_LmNOTIFYRV2 0x00080000
1292#define EN_LmNOTIFYRV1 0x00040000
1293#define EN_LmNOTIFYRV0 0x00020000
1294#define EN_LmNOTIFYSPIN 0x00010000
1295#define EN_LmBROADRV4 0x00008000
1296#define EN_LmBROADRV3 0x00004000
1297#define EN_LmBROADRV2 0x00002000
1298#define EN_LmBROADRV1 0x00001000
1299#define EN_LmBROADRV0 0x00000800
1300#define EN_LmBROADRVCH1 0x00000400
1301#define EN_LmBROADRVCH0 0x00000200
1302#define EN_LmBROADCH 0x00000100
1303#define EN_LmAIPRVWP 0x00000080
1304#define EN_LmAIPWP 0x00000040
1305#define EN_LmAIPWD 0x00000020
1306#define EN_LmAIPWC 0x00000010
1307#define EN_LmAIPRV2 0x00000008
1308#define EN_LmAIPRV1 0x00000004
1309#define EN_LmAIPRV0 0x00000002
1310#define EN_LmAIPNRML 0x00000001
1311
1312#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \
1313 EN_LmDONE | EN_LmOPENACPT | \
1314 EN_LmOPENRJCT | EN_LmOPENRTRY | \
1315 EN_LmCLOSERV1 | EN_LmCLOSERV0 | \
1316 EN_LmCLOSENORM | EN_LmCLOSECLAF | \
1317 EN_LmBROADRV4 | EN_LmBROADRV3 | \
1318 EN_LmBROADRV2 | EN_LmBROADRV1 | \
1319 EN_LmBROADRV0 | EN_LmBROADRVCH1 | \
1320 EN_LmBROADRVCH0 | EN_LmBROADCH | \
1321 EN_LmAIPRVWP | EN_LmAIPWP | \
1322 EN_LmAIPWD | EN_LmAIPWC | \
1323 EN_LmAIPRV2 | EN_LmAIPRV1 | \
1324 EN_LmAIPRV0 | EN_LmAIPNRML)
1325
1326#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4)
1327
1328#define EN_LmXRDY 0x00040000
1329#define EN_LmSYNCSRST 0x00020000
1330#define EN_LmSYNC 0x00010000
1331#define EN_LmXHOLD 0x00008000
1332#define EN_LmRRDY 0x00004000
1333#define EN_LmHOLD 0x00002000
1334#define EN_LmROK 0x00001000
1335#define EN_LmRIP 0x00000800
1336#define EN_LmCRBLK 0x00000400
1337#define EN_LmACK 0x00000200
1338#define EN_LmNAK 0x00000100
1339#define EN_LmHARDRST 0x00000080
1340#define EN_LmERROR 0x00000040
1341#define EN_LmRERR 0x00000020
1342#define EN_LmPMREQP 0x00000010
1343#define EN_LmPMREQS 0x00000008
1344#define EN_LmPMACK 0x00000004
1345#define EN_LmPMNAK 0x00000002
1346#define EN_LmDMAT 0x00000001
1347
1348#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \
1349 EN_LmSYNCSRST | \
1350 EN_LmPMREQP | EN_LmPMREQS | \
1351 EN_LmPMACK | EN_LmPMNAK)
1352
1353#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8)
1354
1355#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC)
1356
1357#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0)
1358
1359
1360/*
1361 * LmSEQ CIO Bus Mode 3 Register.
1362 * Mode 3: Configuration and Setup, IOP Context SCB.
1363 */
1364#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48)
1365
1366#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90)
1367
1368#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92)
1369
1370#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94)
1371
1372#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96)
1373
1374#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98)
1375
1376#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A)
1377
1378#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C)
1379
1380#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E)
1381
1382#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4)
1383
1384#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6)
1385
1386#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0)
1387
1388#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4)
1389
1390#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2)
1391
1392#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8)
1393
1394
1395/*
1396 * LmSEQ CIO Bus Mode 5 Registers.
1397 * Mode 5: Phy/OOB Control and Status.
1398 */
1399#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg))
1400
1401#define OOB_BFLTR 0x100
1402
1403#define BFLTR_THR_MASK 0xF0
1404#define BFLTR_TC_MASK 0x0F
1405
1406#define OOB_INIT_MIN 0x102
1407
1408#define OOB_INIT_MAX 0x104
1409
1410#define OOB_INIT_NEG 0x106
1411
1412#define OOB_SAS_MIN 0x108
1413
1414#define OOB_SAS_MAX 0x10A
1415
1416#define OOB_SAS_NEG 0x10C
1417
1418#define OOB_WAKE_MIN 0x10E
1419
1420#define OOB_WAKE_MAX 0x110
1421
1422#define OOB_WAKE_NEG 0x112
1423
1424#define OOB_IDLE_MAX 0x114
1425
1426#define OOB_BURST_MAX 0x116
1427
1428#define OOB_DATA_KBITS 0x126
1429
1430#define OOB_ALIGN_0_DATA 0x12C
1431
1432#define OOB_ALIGN_1_DATA 0x130
1433
1434#define D10_2_DATA_k 0x00
1435#define SYNC_DATA_k 0x02
1436#define ALIGN_1_DATA_k 0x04
1437#define ALIGN_0_DATA_k 0x08
1438#define BURST_DATA_k 0x10
1439
1440#define OOB_PHY_RESET_COUNT 0x13C
1441
1442#define OOB_SIG_GEN 0x140
1443
1444#define START_OOB 0x80
1445#define START_DWS 0x40
1446#define ALIGN_CNT3 0x30
1447#define ALIGN_CNT2 0x20
1448#define ALIGN_CNT1 0x10
1449#define ALIGN_CNT4 0x00
1450#define STOP_DWS 0x08
1451#define SEND_COMSAS 0x04
1452#define SEND_COMINIT 0x02
1453#define SEND_COMWAKE 0x01
1454
1455#define OOB_XMIT 0x141
1456
1457#define TX_ENABLE 0x80
1458#define XMIT_OOB_BURST 0x10
1459#define XMIT_D10_2 0x08
1460#define XMIT_SYNC 0x04
1461#define XMIT_ALIGN_1 0x02
1462#define XMIT_ALIGN_0 0x01
1463
1464#define FUNCTION_MASK 0x142
1465
1466#define SAS_MODE_DIS 0x80
1467#define SATA_MODE_DIS 0x40
1468#define SPINUP_HOLD_DIS 0x20
1469#define HOT_PLUG_DIS 0x10
1470#define SATA_PS_DIS 0x08
1471#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS)
1472
1473#define OOB_MODE 0x143
1474
1475#define SAS_MODE 0x80
1476#define SATA_MODE 0x40
1477#define SLOW_CLK 0x20
1478#define FORCE_XMIT_15 0x08
1479#define PHY_SPEED_60 0x04
1480#define PHY_SPEED_30 0x02
1481#define PHY_SPEED_15 0x01
1482
1483#define CURRENT_STATUS 0x144
1484
1485#define CURRENT_OOB_DONE 0x80
1486#define CURRENT_LOSS_OF_SIGNAL 0x40
1487#define CURRENT_SPINUP_HOLD 0x20
1488#define CURRENT_HOT_PLUG_CNCT 0x10
1489#define CURRENT_GTO_TIMEOUT 0x08
1490#define CURRENT_OOB_TIMEOUT 0x04
1491#define CURRENT_DEVICE_PRESENT 0x02
1492#define CURRENT_OOB_ERROR 0x01
1493
1494#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \
1495 CURRENT_GTO_TIMEOUT)
1496
1497#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \
1498 CURRENT_OOB_ERROR)
1499
1500#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \
1501 CURRENT_HOT_PLUG_CNCT | \
1502 CURRENT_DEVICE_PRESENT)
1503
1504#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \
1505 CURRENT_DEVICE_PRESENT)
1506
1507#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL
1508
1509#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \
1510 CURRENT_LOSS_OF_SIGNAL | \
1511 CURRENT_SPINUP_HOLD | \
1512 CURRENT_HOT_PLUG_CNCT | \
1513 CURRENT_GTO_TIMEOUT | \
1514 CURRENT_DEVICE_PRESENT | \
1515 CURRENT_OOB_ERROR )
1516
1517#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \
1518 CURRENT_GTO_TIMEOUT | \
1519 CURRENT_OOB_TIMEOUT | \
1520 CURRENT_OOB_ERROR )
1521
1522#define SPEED_MASK 0x145
1523
1524#define SATA_SPEED_30_DIS 0x10
1525#define SATA_SPEED_15_DIS 0x08
1526#define SAS_SPEED_60_DIS 0x04
1527#define SAS_SPEED_30_DIS 0x02
1528#define SAS_SPEED_15_DIS 0x01
1529#define SAS_SPEED_MASK_DEFAULT 0x00
1530
1531#define OOB_TIMER_ENABLE 0x14D
1532
1533#define HOT_PLUG_EN 0x80
1534#define RCD_EN 0x40
1535#define COMTIMER_EN 0x20
1536#define SNTT_EN 0x10
1537#define SNLT_EN 0x04
1538#define SNWT_EN 0x02
1539#define ALIGN_EN 0x01
1540
1541#define OOB_STATUS 0x14E
1542
1543#define OOB_DONE 0x80
1544#define LOSS_OF_SIGNAL 0x40 /* ro */
1545#define SPINUP_HOLD 0x20
1546#define HOT_PLUG_CNCT 0x10 /* ro */
1547#define GTO_TIMEOUT 0x08 /* ro */
1548#define OOB_TIMEOUT 0x04 /* ro */
1549#define DEVICE_PRESENT 0x02 /* ro */
1550#define OOB_ERROR 0x01 /* ro */
1551
1552#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \
1553 OOB_TIMEOUT | OOB_ERROR)
1554
1555#define OOB_STATUS_CLEAR 0x14F
1556
1557#define OOB_DONE_CLR 0x80
1558#define LOSS_OF_SIGNAL_CLR 0x40
1559#define SPINUP_HOLD_CLR 0x20
1560#define HOT_PLUG_CNCT_CLR 0x10
1561#define GTO_TIMEOUT_CLR 0x08
1562#define OOB_TIMEOUT_CLR 0x04
1563#define OOB_ERROR_CLR 0x01
1564
1565#define HOT_PLUG_DELAY 0x150
1566/* In 5 ms units. 20 = 100 ms. */
1567#define HOTPLUG_DELAY_TIMEOUT 20
1568
1569
1570#define INT_ENABLE_2 0x15A
1571
1572#define OOB_DONE_EN 0x80
1573#define LOSS_OF_SIGNAL_EN 0x40
1574#define SPINUP_HOLD_EN 0x20
1575#define HOT_PLUG_CNCT_EN 0x10
1576#define GTO_TIMEOUT_EN 0x08
1577#define OOB_TIMEOUT_EN 0x04
1578#define DEVICE_PRESENT_EN 0x02
1579#define OOB_ERROR_EN 0x01
1580
1581#define PHY_CONTROL_0 0x160
1582
1583#define PHY_LOWPWREN_TX 0x80
1584#define PHY_LOWPWREN_RX 0x40
1585#define SPARE_REG_160_B5 0x20
1586#define OFFSET_CANCEL_RX 0x10
1587
1588/* bits 3:2 */
1589#define PHY_RXCOMCENTER_60V 0x00
1590#define PHY_RXCOMCENTER_70V 0x04
1591#define PHY_RXCOMCENTER_80V 0x08
1592#define PHY_RXCOMCENTER_90V 0x0C
1593#define PHY_RXCOMCENTER_MASK 0x0C
1594
1595#define PHY_RESET 0x02
1596#define SAS_DEFAULT_SEL 0x01
1597
1598#define PHY_CONTROL_1 0x161
1599
1600/* bits 2:0 */
1601#define SATA_PHY_DETLEVEL_50mv 0x00
1602#define SATA_PHY_DETLEVEL_75mv 0x01
1603#define SATA_PHY_DETLEVEL_100mv 0x02
1604#define SATA_PHY_DETLEVEL_125mv 0x03
1605#define SATA_PHY_DETLEVEL_150mv 0x04
1606#define SATA_PHY_DETLEVEL_175mv 0x05
1607#define SATA_PHY_DETLEVEL_200mv 0x06
1608#define SATA_PHY_DETLEVEL_225mv 0x07
1609#define SATA_PHY_DETLEVEL_MASK 0x07
1610
1611/* bits 5:3 */
1612#define SAS_PHY_DETLEVEL_50mv 0x00
1613#define SAS_PHY_DETLEVEL_75mv 0x08
1614#define SAS_PHY_DETLEVEL_100mv 0x10
1615#define SAS_PHY_DETLEVEL_125mv 0x11
1616#define SAS_PHY_DETLEVEL_150mv 0x20
1617#define SAS_PHY_DETLEVEL_175mv 0x21
1618#define SAS_PHY_DETLEVEL_200mv 0x30
1619#define SAS_PHY_DETLEVEL_225mv 0x31
1620#define SAS_PHY_DETLEVEL_MASK 0x38
1621
1622#define PHY_CONTROL_2 0x162
1623
1624/* bits 7:5 */
1625#define SATA_PHY_DRV_400mv 0x00
1626#define SATA_PHY_DRV_450mv 0x20
1627#define SATA_PHY_DRV_500mv 0x40
1628#define SATA_PHY_DRV_550mv 0x60
1629#define SATA_PHY_DRV_600mv 0x80
1630#define SATA_PHY_DRV_650mv 0xA0
1631#define SATA_PHY_DRV_725mv 0xC0
1632#define SATA_PHY_DRV_800mv 0xE0
1633#define SATA_PHY_DRV_MASK 0xE0
1634
1635/* bits 4:3 */
1636#define SATA_PREEMP_0 0x00
1637#define SATA_PREEMP_1 0x08
1638#define SATA_PREEMP_2 0x10
1639#define SATA_PREEMP_3 0x18
1640#define SATA_PREEMP_MASK 0x18
1641
1642#define SATA_CMSH1P5 0x04
1643
1644/* bits 1:0 */
1645#define SATA_SLEW_0 0x00
1646#define SATA_SLEW_1 0x01
1647#define SATA_SLEW_2 0x02
1648#define SATA_SLEW_3 0x03
1649#define SATA_SLEW_MASK 0x03
1650
1651#define PHY_CONTROL_3 0x163
1652
1653/* bits 7:5 */
1654#define SAS_PHY_DRV_400mv 0x00
1655#define SAS_PHY_DRV_450mv 0x20
1656#define SAS_PHY_DRV_500mv 0x40
1657#define SAS_PHY_DRV_550mv 0x60
1658#define SAS_PHY_DRV_600mv 0x80
1659#define SAS_PHY_DRV_650mv 0xA0
1660#define SAS_PHY_DRV_725mv 0xC0
1661#define SAS_PHY_DRV_800mv 0xE0
1662#define SAS_PHY_DRV_MASK 0xE0
1663
1664/* bits 4:3 */
1665#define SAS_PREEMP_0 0x00
1666#define SAS_PREEMP_1 0x08
1667#define SAS_PREEMP_2 0x10
1668#define SAS_PREEMP_3 0x18
1669#define SAS_PREEMP_MASK 0x18
1670
1671#define SAS_CMSH1P5 0x04
1672
1673/* bits 1:0 */
1674#define SAS_SLEW_0 0x00
1675#define SAS_SLEW_1 0x01
1676#define SAS_SLEW_2 0x02
1677#define SAS_SLEW_3 0x03
1678#define SAS_SLEW_MASK 0x03
1679
1680#define PHY_CONTROL_4 0x168
1681
1682#define PHY_DONE_CAL_TX 0x80
1683#define PHY_DONE_CAL_RX 0x40
1684#define RX_TERM_LOAD_DIS 0x20
1685#define TX_TERM_LOAD_DIS 0x10
1686#define AUTO_TERM_CAL_DIS 0x08
1687#define PHY_SIGDET_FLTR_EN 0x04
1688#define OSC_FREQ 0x02
1689#define PHY_START_CAL 0x01
1690
1691/*
1692 * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
1693 */
1694#define PCIX_REG_BASE_ADR 0xB8040000
1695
1696#define PCIC_VENDOR_ID 0x00
1697
1698#define PCIC_DEVICE_ID 0x02
1699
1700#define PCIC_COMMAND 0x04
1701
1702#define INT_DIS 0x0400
1703#define FBB_EN 0x0200 /* ro */
1704#define SERR_EN 0x0100
1705#define STEP_EN 0x0080 /* ro */
1706#define PERR_EN 0x0040
1707#define VGA_EN 0x0020 /* ro */
1708#define MWI_EN 0x0010
1709#define SPC_EN 0x0008
1710#define MST_EN 0x0004
1711#define MEM_EN 0x0002
1712#define IO_EN 0x0001
1713
1714#define PCIC_STATUS 0x06
1715
1716#define PERR_DET 0x8000
1717#define SERR_GEN 0x4000
1718#define MABT_DET 0x2000
1719#define TABT_DET 0x1000
1720#define TABT_GEN 0x0800
1721#define DPERR_DET 0x0100
1722#define CAP_LIST 0x0010
1723#define INT_STAT 0x0008
1724
1725#define PCIC_DEVREV_ID 0x08
1726
1727#define PCIC_CLASS_CODE 0x09
1728
1729#define PCIC_CACHELINE_SIZE 0x0C
1730
1731#define PCIC_MBAR0 0x10
1732
1733#define PCIC_MBAR0_OFFSET 0
1734
1735#define PCIC_MBAR1 0x18
1736
1737#define PCIC_MBAR1_OFFSET 2
1738
1739#define PCIC_IOBAR 0x20
1740
1741#define PCIC_IOBAR_OFFSET 4
1742
1743#define PCIC_SUBVENDOR_ID 0x2C
1744
1745#define PCIC_SUBSYTEM_ID 0x2E
1746
1747#define PCIX_STATUS 0x44
1748#define RCV_SCE 0x20000000
1749#define UNEXP_SC 0x00080000
1750#define SC_DISCARD 0x00040000
1751
1752#define ECC_CTRL_STAT 0x48
1753#define UNCOR_ECCERR 0x00000008
1754
1755#define PCIC_PM_CSR 0x5C
1756
1757#define PWR_STATE_D0 0
1758#define PWR_STATE_D1 1 /* not supported */
1759#define PWR_STATE_D2 2 /* not supported */
1760#define PWR_STATE_D3 3
1761
1762#define PCIC_BASE1 0x6C /* internal use only */
1763
1764#define BASE1_RSVD 0xFFFFFFF8
1765
1766#define PCIC_BASEA 0x70 /* internal use only */
1767
1768#define BASEA_RSVD 0xFFFFFFC0
1769#define BASEA_START 0
1770
1771#define PCIC_BASEB 0x74 /* internal use only */
1772
1773#define BASEB_RSVD 0xFFFFFF80
1774#define BASEB_IOMAP_MASK 0x7F
1775#define BASEB_START 0x80
1776
1777#define PCIC_BASEC 0x78 /* internal use only */
1778
1779#define BASEC_RSVD 0xFFFFFFFC
1780#define BASEC_MASK 0x03
1781#define BASEC_START 0x58
1782
1783#define PCIC_MBAR_KEY 0x7C /* internal use only */
1784
1785#define MBAR_KEY_MASK 0xFFFFFFFF
1786
1787#define PCIC_HSTPCIX_CNTRL 0xA0
1788
1789#define REWIND_DIS 0x0800
1790#define SC_TMR_DIS 0x04000000
1791
1792#define PCIC_MBAR0_MASK 0xA8
1793#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000
1794#define PCIC_MBAR0_SIZE_SHIFT 13
1795#define PCIC_MBAR0_SIZE(val) \
1796 (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT)
1797
1798#define PCIC_FLASH_MBAR 0xB8
1799
1800#define PCIC_INTRPT_STAT 0xD4
1801
1802#define PCIC_TP_CTRL 0xFC
1803
1804/*
1805 * EXSI Registers, Addresss Range: (0x00-0xFC)
1806 */
1807#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI
1808
1809#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00)
1810
1811#define OCMINITIALIZED 0x80000000
1812#define ASIEN 0x00400000
1813#define HCMODE 0x00200000
1814#define PCIDEF 0x00100000
1815#define COMSTOCK 0x00080000
1816#define SEEPROMEND 0x00040000
1817#define MSTTIMEN 0x00020000
1818#define XREGEX 0x00000200
1819#define NVRAMW 0x00000100
1820#define NVRAMEX 0x00000080
1821#define SRAMW 0x00000040
1822#define SRAMEX 0x00000020
1823#define FLASHW 0x00000010
1824#define FLASHEX 0x00000008
1825#define SEEPROMCFG 0x00000004
1826#define SEEPROMTYP 0x00000002
1827#define SEEPROMEX 0x00000001
1828
1829
1830#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04)
1831
1832#define MODINT_EN 0x00000001
1833
1834
1835#define PMSTATR (EXSI_REG_BASE_ADR + 0x10)
1836
1837#define FLASHRST 0x00000002
1838#define FLASHRDY 0x00000001
1839
1840
1841#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14)
1842
1843#define FLWEH_MASK 0x30000000
1844#define FLWESU_MASK 0x0C000000
1845#define FLWEPW_MASK 0x03F00000
1846#define FLOEH_MASK 0x000C0000
1847#define FLOESU_MASK 0x00030000
1848#define FLOEPW_MASK 0x0000FC00
1849#define FLCSH_MASK 0x00000300
1850#define FLCSSU_MASK 0x000000C0
1851#define FLCSPW_MASK 0x0000003F
1852
1853#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18)
1854
1855#define SRWEH_MASK 0x30000000
1856#define SRWESU_MASK 0x0C000000
1857#define SRWEPW_MASK 0x03F00000
1858
1859#define SROEH_MASK 0x000C0000
1860#define SROESU_MASK 0x00030000
1861#define SROEPW_MASK 0x0000FC00
1862#define SRCSH_MASK 0x00000300
1863#define SRCSSU_MASK 0x000000C0
1864#define SRCSPW_MASK 0x0000003F
1865
1866#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C)
1867
1868#define NVWEH_MASK 0x30000000
1869#define NVWESU_MASK 0x0C000000
1870#define NVWEPW_MASK 0x03F00000
1871#define NVOEH_MASK 0x000C0000
1872#define NVOESU_MASK 0x00030000
1873#define NVOEPW_MASK 0x0000FC00
1874#define NVCSH_MASK 0x00000300
1875#define NVCSSU_MASK 0x000000C0
1876#define NVCSPW_MASK 0x0000003F
1877
1878#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20)
1879
1880#define XRWEH_MASK 0x30000000
1881#define XRWESU_MASK 0x0C000000
1882#define XRWEPW_MASK 0x03F00000
1883#define XROEH_MASK 0x000C0000
1884#define XROESU_MASK 0x00030000
1885#define XROEPW_MASK 0x0000FC00
1886#define XRCSH_MASK 0x00000300
1887#define XRCSSU_MASK 0x000000C0
1888#define XRCSPW_MASK 0x0000003F
1889
1890#define XREGADDR (EXSI_REG_BASE_ADR + 0x24)
1891
1892#define XRADDRINCEN 0x80000000
1893#define XREGADD_MASK 0x007FFFFF
1894
1895
1896#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28)
1897
1898#define XREGDATA_MASK 0x0000FFFF
1899
1900#define GPIOOER (EXSI_REG_BASE_ADR + 0x40)
1901
1902#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44)
1903
1904#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48)
1905
1906#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C)
1907
1908#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50)
1909
1910#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54)
1911
1912#define GPIO_EXTSRC 0x00000001
1913
1914#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0)
1915
1916#define SXFERDONE 0x00000100
1917#define SXFERCNT_MASK 0x000000E0
1918#define SCMDTYP_MASK 0x0000001C
1919#define SXFERSTART 0x00000002
1920#define SXFEREN 0x00000001
1921
1922#define SRATER (EXSI_REG_BASE_ADR + 0xA4)
1923
1924#define SADDRR (EXSI_REG_BASE_ADR + 0xA8)
1925
1926#define SADDR_MASK 0x0000FFFF
1927
1928#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC)
1929
1930#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC)
1931#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD)
1932#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE)
1933#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF)
1934
1935#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0)
1936
1937#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0)
1938#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1)
1939#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2)
1940#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3)
1941
1942#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0)
1943#define ASIFMTERR 0x00000400
1944#define ASISEECHKERR 0x00000200
1945#define ASIERR 0x00000100
1946
1947#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4)
1948#define CHECKSUM_MASK 0x0000FFFF
1949
1950#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8)
1951#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC)
1952#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0)
1953#define CPI2ASIBYTECNT_MASK 0x00070000
1954#define CPI2ASIBYTEEN_MASK 0x0000F000
1955#define CPI2ASITARGERR_MASK 0x00000F00
1956#define CPI2ASITARGMID_MASK 0x000000F0
1957#define CPI2ASIMSTERR_MASK 0x0000000F
1958
1959/*
1960 * XSRAM, External SRAM (DWord and any BE pattern accessible)
1961 */
1962#define XSRAM_REG_BASE_ADDR 0xB8100000
1963#define XSRAM_SIZE 0x100000
1964
1965/*
1966 * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF).
1967 */
1968#define NVRAM_REG_BASE_ADR 0xBF800000
1969#define NVRAM_MAX_BASE_ADR 0x003FFFFF
1970
1971/* OCM base address */
1972#define OCM_BASE_ADDR 0xA0000000
1973#define OCM_MAX_SIZE 0x20000
1974
1975/*
1976 * Sequencers (Central and Link) Scratch RAM page definitions.
1977 */
1978
1979/*
1980 * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024
1981 * byte memory. It is dword accessible and has byte parity
1982 * protection. The CSEQ accesses it in 32 byte windows, either as mode
1983 * dependent or mode independent memory. Each mode has 96 bytes,
1984 * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of
1985 * Mode Independent memory (four 32 byte pages 3-7). Note that mode
1986 * dependent scratch memory, Mode 8, page 0-3 overlaps mode
1987 * independent scratch memory, pages 0-3.
1988 * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and
1989 * 128 bytes in mode 8,
1990 * - 259 bytes of mode independent scratch, common to modes 0-15.
1991 *
1992 * Sequencer scratch RAM is 1024 bytes. This scratch memory is
1993 * divided into mode dependent and mode independent scratch with this
1994 * memory further subdivided into pages of size 32 bytes. There are 5
1995 * pages (160 bytes) of mode independent scratch and 3 pages of
1996 * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages
1997 * 0-2 dependent scratch overlap with pages 0-2 of mode independent
1998 * scratch memory.
1999 *
2000 * The host accesses this scratch in a different manner from the
2001 * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE
2002 * and CMnSCRPAGE to access the scratch memory. A flat mapping of the
2003 * scratch memory is avaliable for software convenience and to prevent
2004 * corruption while the sequencer is running. This memory is mapped
2005 * onto addresses 800h - BFFh, total of 400h bytes.
2006 *
2007 * These addresses are mapped as follows:
2008 *
2009 * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1
2010 * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1
2011 * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1
2012 * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1
2013 * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1
2014 * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1
2015 * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1
2016 * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1
2017 * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2
2018 * Mode Independent Scratch Pages 0-2
2019 * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3
2020 * Mode Independent Scratch Page 3
2021 * A80h-AFFh Mode Independent Scratch Pages 4-7
2022 * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2
2023 * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2
2024 * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2
2025 * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2
2026 * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2
2027 * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2
2028 * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2
2029 * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2
2030 */
2031
2032/* General macros */
2033#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */
2034
2035/* All macros start with offsets from base + 0x800 (CMAPPEDSCR).
2036 * Mode dependent scratch page 0, mode 0.
2037 * For modes 1-7 you have to do arithmetic. */
2038#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000)
2039#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002)
2040#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004)
2041#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006)
2042#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008)
2043
2044/* Mode dependent scratch page 0 mode 8 macros. */
2045#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200)
2046#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202)
2047#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204)
2048#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206)
2049#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208)
2050#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A)
2051#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C)
2052#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E)
2053#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210)
2054#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212)
2055#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213)
2056#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A)
2057#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C)
2058#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E)
2059
2060/* Mode dependent scratch page 1 mode 8 macros. */
2061#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220)
2062#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228)
2063
2064/* Mode dependent scratch page 2 mode 8 macros */
2065#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240)
2066#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248)
2067#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250)
2068#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254)
2069
2070/* Mode independent scratch page 4 macros. */
2071#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280)
2072#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282)
2073#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284)
2074#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286)
2075#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288)
2076#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A)
2077#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C)
2078#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E)
2079#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290)
2080#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292)
2081#define CSEQ_REG0 (CMAPPEDSCR + 0x0294)
2082#define CSEQ_REG1 (CMAPPEDSCR + 0x0296)
2083#define CSEQ_REG2 (CMAPPEDSCR + 0x0298)
2084#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C)
2085#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D)
2086#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E)
2087
2088/* Mode independent scratch page 5 macros. */
2089#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0)
2090#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8)
2091#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0)
2092#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2)
2093#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4)
2094#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6)
2095#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7)
2096#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8)
2097
2098/* Mode independent scratch page 6 macros. */
2099#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0)
2100#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2)
2101#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4)
2102#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6)
2103#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7)
2104#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8)
2105#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA)
2106#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0)
2107#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2)
2108#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5)
2109#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6)
2110#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8)
2111#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA)
2112#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC)
2113
2114/* Mode independent scratch page 7 macros. */
2115#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0)
2116#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8)
2117#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0)
2118#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2)
2119#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4)
2120#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6)
2121#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7)
2122#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8)
2123#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA)
2124#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC)
2125
2126/***************************************************************************
2127* Link m Sequencer scratch RAM is 512 bytes.
2128* This scratch memory is divided into mode dependent and mode
2129* independent scratch with this memory further subdivided into
2130* pages of size 32 bytes. There are 4 pages (128 bytes) of
2131* mode independent scratch and 4 pages of dependent scratch
2132* memory for modes 0-2 (384 bytes).
2133*
2134* The host accesses this scratch in a different manner from the
2135* link sequencer. The sequencer has to use LSEQ registers
2136* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat
2137* mapping of the scratch memory is avaliable for software
2138* convenience and to prevent corruption while the sequencer is
2139* running. This memory is mapped onto addresses 800h - 9FFh.
2140*
2141* These addresses are mapped as follows:
2142*
2143* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2
2144* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3
2145* Mode Dependent Scratch Mode 5 Page 0
2146* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2
2147* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3
2148* Mode Dependent Scratch Mode 5 Page 1
2149* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2
2150* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3
2151* Mode Dependent Scratch Mode 5 Page 2
2152* 980h-9DFh Mode Independent Scratch Pages 0-3
2153* 9E0h-9FFh Mode Independent Scratch Page 3
2154* Mode Dependent Scratch Mode 5 Page 3
2155*
2156****************************************************************************/
2157/* General macros */
2158#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */
2159#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */
2160#define LSEQ_MODE5_PAGE0_OFFSET 0x60
2161
2162/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */
2163/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */
2164#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000)
2165#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002)
2166#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004)
2167
2168/* Mode flag macros (byte 0) */
2169#define SAS_SAVECTX_OCCURRED 0x80
2170#define SAS_OOBSVC_OCCURRED 0x40
2171#define SAS_OOB_DEVICE_PRESENT 0x20
2172#define SAS_CFGHDR_OCCURRED 0x10
2173#define SAS_RCV_INTS_ARE_DISABLED 0x08
2174#define SAS_OOB_HOT_PLUG_CNCT 0x04
2175#define SAS_AWAIT_OPEN_CONNECTION 0x02
2176#define SAS_CFGCMPLT_OCCURRED 0x01
2177
2178/* Mode flag macros (byte 1) */
2179#define SAS_RLSSCB_OCCURRED 0x80
2180#define SAS_FORCED_HEADER_MISS 0x40
2181
2182#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006)
2183#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008)
2184#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B)
2185#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C)
2186
2187/* Mode dependent scratch page 0 macros for mode 0 (non-common) */
2188/* Absolute offsets */
2189#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E)
2190#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010)
2191#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012)
2192#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014)
2193#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016)
2194#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A)
2195#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B)
2196#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C)
2197#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D)
2198#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E)
2199
2200/* Mode dependent scratch page 0 macros for mode 1 (non-common) */
2201/* Absolute offsets */
2202#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E)
2203#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090)
2204#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092)
2205#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A)
2206#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B)
2207#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C)
2208#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D)
2209#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E)
2210
2211/* Mode dependent scratch page 0 macros for mode 2 (non-common) */
2212#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E)
2213#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110)
2214#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112)
2215#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114)
2216#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116)
2217#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A)
2218
2219/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */
2220/* Absolute offsets */
2221#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E)
2222#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F)
2223#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070)
2224#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072)
2225#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073)
2226#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074)
2227#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075)
2228#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076)
2229#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A)
2230#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C)
2231#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E)
2232
2233/* Mode dependent scratch page 1, mode 0 and mode 1 */
2234#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020)
2235#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030)
2236#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0)
2237#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0)
2238
2239/* Mode dependent scratch page 1 macros for mode 2 */
2240/* Absolute offsets */
2241#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120)
2242#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124)
2243#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128)
2244
2245/* Mode dependent scratch page 1 macros for mode 4/5 */
2246#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0)
2247#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1)
2248#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4)
2249#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5)
2250#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB)
2251#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0)
2252#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2)
2253#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4)
2254
2255/* Mode dependent scratch page 2 macros for mode 0 */
2256/* Absolute offsets */
2257#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040)
2258#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B)
2259#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C)
2260#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E)
2261#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F)
2262
2263/* Mode dependent scratch page 2 macros for mode 1 */
2264/* Absolute offsets */
2265/* byte 0 bits 1-0 are domain select. */
2266#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0)
2267#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8)
2268#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC)
2269#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4)
2270
2271/* Mode dependent scratch page 2 macros for mode 2 */
2272/* Absolute offsets */
2273#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140)
2274#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144)
2275#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148)
2276#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C)
2277#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \
2278 (LmSCRATCH(LinkNum) + 0x0150)
2279#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154)
2280
2281/* Mode dependent scratch page 2 macros for mode 5 */
2282#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160)
2283#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164)
2284#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168)
2285#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C)
2286
2287/* Mode dependent scratch page 3 macros for modes 0 and 1 */
2288/* None defined */
2289
2290/* Mode dependent scratch page 3 macros for modes 2 and 5 */
2291/* None defined */
2292
2293/* Mode Independent Scratch page 0 macros. */
2294#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180)
2295#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182)
2296#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186)
2297#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187)
2298/*
2299 * Currently only bit 0, SAS_DWSAQD, is used.
2300 */
2301#define SAS_DWSAQD 0x01 /*
2302 * DWSSTATUS: DWSAQD
2303 * bit las read in ISR.
2304 */
2305#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188)
2306/* Connection states (byte 0) */
2307#define SAS_WE_OPENED_CS 0x01
2308#define SAS_DEVICE_OPENED_CS 0x02
2309#define SAS_WE_SENT_DONE_CS 0x04
2310#define SAS_DEVICE_SENT_DONE_CS 0x08
2311#define SAS_WE_SENT_CLOSE_CS 0x10
2312#define SAS_DEVICE_SENT_CLOSE_CS 0x20
2313#define SAS_WE_SENT_BREAK_CS 0x40
2314#define SAS_DEVICE_SENT_BREAK_CS 0x80
2315/* Connection states (byte 1) */
2316#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01
2317#define SAS_AIP_RECEIVED_CS 0x02
2318#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04
2319#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08
2320#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10
2321#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20
2322/* Connection states (byte 2) */
2323#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01
2324#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02
2325#define SAS_DEVICE_SENT_DMAT_CS 0x04
2326#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08
2327#define SAS_CLEARING_AFFILIATION_CS 0x20
2328#define SAS_RXTASK_ACTIVE_CS 0x40
2329#define SAS_TXTASK_ACTIVE_CS 0x80
2330/* Connection states (byte 3) */
2331#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01
2332#define SAS_DWS_TIMER_EXPIRED_CS 0x02
2333#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04
2334#define SAS_PHY_DISABLED_CS 0x08
2335#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10
2336#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20
2337#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40
2338#define SAS_DEVICE_SENT_REG_FIS_CS 0x40
2339#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80
2340#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\
2341 SAS_DWS_TIMER_EXPIRED_CS |\
2342 SAS_LINK_RESET_NOT_COMPLETE_CS|\
2343 SAS_PHY_DISABLED_CS)
2344
2345#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\
2346 SAS_PHY_EVENT_TASK_ACTIVE_CS |\
2347 SAS_DEVICE_SENT_ID_FRAME_CS |\
2348 SAS_DEVICE_SENT_HARD_RESET_CS)
2349
2350#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C)
2351#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E)
2352#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F)
2353#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192)
2354#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194)
2355#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196)
2356#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198)
2357
2358/* Mode independent scratch page 1 macros. */
2359#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0)
2360#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2)
2361#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4)
2362#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6)
2363#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8)
2364#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9)
2365#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA)
2366#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB)
2367#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC)
2368#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD)
2369#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE)
2370#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8)
2371#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC)
2372#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE)
2373
2374/* Mode independent scratch page 2 macros. */
2375#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0)
2376#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2)
2377#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4)
2378#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6)
2379#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8)
2380#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9)
2381#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA)
2382#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB)
2383#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC)
2384#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD)
2385#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE)
2386#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4)
2387
2388/* Mode independent scratch page 3 macros. */
2389#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0)
2390#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4)
2391#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8)
2392#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC)
2393#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0)
2394#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4)
2395#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8)
2396#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC)
2397
2398#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
new file mode 100644
index 000000000000..64d231712345
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -0,0 +1,785 @@
1/*
2 * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SAS_H_
28#define _AIC94XX_SAS_H_
29
30#include <scsi/libsas.h>
31
32/* ---------- DDBs ---------- */
33/* DDBs are device descriptor blocks which describe a device in the
34 * domain that this sequencer can maintain low-level connections for
35 * us. They are be 64 bytes.
36 */
37
38struct asd_ddb_ssp_smp_target_port {
39 u8 conn_type; /* byte 0 */
40#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */
41
42 u8 conn_rate;
43 __be16 init_conn_tag;
44 u8 dest_sas_addr[8]; /* bytes 4-11 */
45
46 __le16 send_queue_head;
47 u8 sq_suspended;
48 u8 ddb_type; /* DDB_TYPE_TARGET */
49#define DDB_TYPE_UNUSED 0xFF
50#define DDB_TYPE_TARGET 0xFE
51#define DDB_TYPE_INITIATOR 0xFD
52#define DDB_TYPE_PM_PORT 0xFC
53
54 __le16 _r_a;
55 __be16 awt_def;
56
57 u8 compat_features; /* byte 20 */
58 u8 pathway_blocked_count;
59 __be16 arb_wait_time;
60 __be32 more_compat_features; /* byte 24 */
61
62 u8 conn_mask;
63 u8 flags; /* concurrent conn:2,2 and open:0(1) */
64#define CONCURRENT_CONN_SUPP 0x04
65#define OPEN_REQUIRED 0x01
66
67 u16 _r_b;
68 __le16 exec_queue_tail;
69 __le16 send_queue_tail;
70 __le16 sister_ddb;
71
72 __le16 _r_c;
73
74 u8 max_concurrent_conn;
75 u8 num_concurrent_conn;
76 u8 num_contexts;
77
78 u8 _r_d;
79
80 __le16 active_task_count;
81
82 u8 _r_e[9];
83
84 u8 itnl_reason; /* I_T nexus loss reason */
85
86 __le16 _r_f;
87
88 __le16 itnl_timeout;
89#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */
90
91 __le32 itnl_timestamp;
92} __attribute__ ((packed));
93
94struct asd_ddb_stp_sata_target_port {
95 u8 conn_type; /* byte 0 */
96 u8 conn_rate;
97 __be16 init_conn_tag;
98 u8 dest_sas_addr[8]; /* bytes 4-11 */
99
100 __le16 send_queue_head;
101 u8 sq_suspended;
102 u8 ddb_type; /* DDB_TYPE_TARGET */
103
104 __le16 _r_a;
105
106 __be16 awt_def;
107 u8 compat_features; /* byte 20 */
108 u8 pathway_blocked_count;
109 __be16 arb_wait_time;
110 __be32 more_compat_features; /* byte 24 */
111
112 u8 conn_mask;
113 u8 flags; /* concurrent conn:2,2 and open:0(1) */
114#define SATA_MULTIPORT 0x80
115#define SUPPORTS_AFFIL 0x40
116#define STP_AFFIL_POL 0x20
117
118 u8 _r_b;
119 u8 flags2; /* STP close policy:0 */
120#define STP_CL_POL_NO_TX 0x00
121#define STP_CL_POL_BTW_CMDS 0x01
122
123 __le16 exec_queue_tail;
124 __le16 send_queue_tail;
125 __le16 sister_ddb;
126 __le16 ata_cmd_scbptr;
127 __le32 sata_tag_alloc_mask;
128 __le16 active_task_count;
129 __le16 _r_c;
130 __le32 sata_sactive;
131 u8 num_sata_tags;
132 u8 sata_status;
133 u8 sata_ending_status;
134 u8 itnl_reason; /* I_T nexus loss reason */
135 __le16 ncq_data_scb_ptr;
136 __le16 itnl_timeout;
137 __le32 itnl_timestamp;
138} __attribute__ ((packed));
139
140/* This struct asd_ddb_init_port, describes the device descriptor block
141 * of an initiator port (when the sequencer is operating in target mode).
142 * Bytes [0,11] and [20,27] are from the OPEN address frame.
143 * The sequencer allocates an initiator port DDB entry.
144 */
145struct asd_ddb_init_port {
146 u8 conn_type; /* byte 0 */
147 u8 conn_rate;
148 __be16 init_conn_tag; /* BE */
149 u8 dest_sas_addr[8];
150 __le16 send_queue_head; /* LE, byte 12 */
151 u8 sq_suspended;
152 u8 ddb_type; /* DDB_TYPE_INITIATOR */
153 __le16 _r_a;
154 __be16 awt_def; /* BE */
155 u8 compat_features;
156 u8 pathway_blocked_count;
157 __be16 arb_wait_time; /* BE */
158 __be32 more_compat_features; /* BE */
159 u8 conn_mask;
160 u8 flags; /* == 5 */
161 u16 _r_b;
162 __le16 exec_queue_tail; /* execution queue tail */
163 __le16 send_queue_tail;
164 __le16 sister_ddb;
165 __le16 init_resp_timeout; /* initiator response timeout */
166 __le32 _r_c;
167 __le16 active_tasks; /* active task count */
168 __le16 init_list; /* initiator list link pointer */
169 __le32 _r_d;
170 u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */
171 u8 itnl_reason; /* I_T nexus loss reason */
172 __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */
173 __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */
174 __le32 itnl_timestamp;
175} __attribute__ ((packed));
176
177/* This struct asd_ddb_sata_tag, describes a look-up table to be used
178 * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8:
179 * NCQ support. This table is used by the sequencers to find the
180 * corresponding SCB, given a SATA II tag value.
181 */
182struct asd_ddb_sata_tag {
183 __le16 scb_pointer[32];
184} __attribute__ ((packed));
185
186/* This struct asd_ddb_sata_pm_table, describes a port number to
187 * connection handle look-up table. SATA targets attached to a port
188 * multiplier require a 4-bit port number value. There is one DDB
189 * entry of this type for each SATA port multiplier (sister DDB).
190 * Given a SATA PM port number, this table gives us the SATA PM Port
191 * DDB of the SATA port multiplier port (i.e. the SATA target
192 * discovered on the port).
193 */
194struct asd_ddb_sata_pm_table {
195 __le16 ddb_pointer[16];
196 __le16 _r_a[16];
197} __attribute__ ((packed));
198
199/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier
200 * port format DDB.
201 */
202struct asd_ddb_sata_pm_port {
203 u8 _r_a[15];
204 u8 ddb_type;
205 u8 _r_b[13];
206 u8 pm_port_flags;
207#define PM_PORT_MASK 0xF0
208#define PM_PORT_SET 0x02
209 u8 _r_c[6];
210 __le16 sister_ddb;
211 __le16 ata_cmd_scbptr;
212 __le32 sata_tag_alloc_mask;
213 __le16 active_task_count;
214 __le16 parent_ddb;
215 __le32 sata_sactive;
216 u8 num_sata_tags;
217 u8 sata_status;
218 u8 sata_ending_status;
219 u8 _r_d[9];
220} __attribute__ ((packed));
221
222/* This struct asd_ddb_seq_shared, describes a DDB shared by the
223 * central and link sequencers. port_map_by_links is indexed phy
224 * number [0,7]; each byte is a bit mask of all the phys that are in
225 * the same port as the indexed phy.
226 */
227struct asd_ddb_seq_shared {
228 __le16 q_free_ddb_head;
229 __le16 q_free_ddb_tail;
230 __le16 q_free_ddb_cnt;
231 __le16 q_used_ddb_head;
232 __le16 q_used_ddb_tail;
233 __le16 shared_mem_lock;
234 __le16 smp_conn_tag;
235 __le16 est_nexus_buf_cnt;
236 __le16 est_nexus_buf_thresh;
237 u32 _r_a;
238 u8 settable_max_contexts;
239 u8 _r_b[23];
240 u8 conn_not_active;
241 u8 phy_is_up;
242 u8 _r_c[8];
243 u8 port_map_by_links[8];
244} __attribute__ ((packed));
245
246/* ---------- SG Element ---------- */
247
248/* This struct sg_el, describes the hardware scatter gather buffer
249 * element. All entries are little endian. In an SCB, there are 2 of
250 * this, plus one more, called a link element of this indicating a
251 * sublist if needed.
252 *
253 * A link element has only the bus address set and the flags (DS) bit
254 * valid. The bus address points to the start of the sublist.
255 *
256 * If a sublist is needed, then that sublist should also include the 2
257 * sg_el embedded in the SCB, in which case next_sg_offset is 32,
258 * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case.
259 */
260struct sg_el {
261 __le64 bus_addr;
262 __le32 size;
263 __le16 _r;
264 u8 next_sg_offs;
265 u8 flags;
266#define ASD_SG_EL_DS_MASK 0x30
267#define ASD_SG_EL_DS_OCM 0x10
268#define ASD_SG_EL_DS_HM 0x00
269#define ASD_SG_EL_LIST_MASK 0xC0
270#define ASD_SG_EL_LIST_EOL 0x40
271#define ASD_SG_EL_LIST_EOS 0x80
272} __attribute__ ((packed));
273
274/* ---------- SCBs ---------- */
275
276/* An SCB (sequencer control block) is comprised of a common header
277 * and a task part, for a total of 128 bytes. All fields are in LE
278 * order, unless otherwise noted.
279 */
280
281/* This struct scb_header, defines the SCB header format.
282 */
283struct scb_header {
284 __le64 next_scb;
285 __le16 index; /* transaction context */
286 u8 opcode;
287} __attribute__ ((packed));
288
289/* SCB opcodes: Execution queue
290 */
291#define INITIATE_SSP_TASK 0x00
292#define INITIATE_LONG_SSP_TASK 0x01
293#define INITIATE_BIDIR_SSP_TASK 0x02
294#define ABORT_TASK 0x03
295#define INITIATE_SSP_TMF 0x04
296#define SSP_TARG_GET_DATA 0x05
297#define SSP_TARG_GET_DATA_GOOD 0x06
298#define SSP_TARG_SEND_RESP 0x07
299#define QUERY_SSP_TASK 0x08
300#define INITIATE_ATA_TASK 0x09
301#define INITIATE_ATAPI_TASK 0x0a
302#define CONTROL_ATA_DEV 0x0b
303#define INITIATE_SMP_TASK 0x0c
304#define SMP_TARG_SEND_RESP 0x0f
305
306/* SCB opcodes: Send Queue
307 */
308#define SSP_TARG_SEND_DATA 0x40
309#define SSP_TARG_SEND_DATA_GOOD 0x41
310
311/* SCB opcodes: Link Queue
312 */
313#define CONTROL_PHY 0x80
314#define SEND_PRIMITIVE 0x81
315#define INITIATE_LINK_ADM_TASK 0x82
316
317/* SCB opcodes: other
318 */
319#define EMPTY_SCB 0xc0
320#define INITIATE_SEQ_ADM_TASK 0xc1
321#define EST_ICL_TARG_WINDOW 0xc2
322#define COPY_MEM 0xc3
323#define CLEAR_NEXUS 0xc4
324#define INITIATE_DDB_ADM_TASK 0xc6
325#define ESTABLISH_NEXUS_ESCB 0xd0
326
327#define LUN_SIZE 8
328
329/* See SAS spec, task IU
330 */
331struct ssp_task_iu {
332 u8 lun[LUN_SIZE]; /* BE */
333 u16 _r_a;
334 u8 tmf;
335 u8 _r_b;
336 __be16 tag; /* BE */
337 u8 _r_c[14];
338} __attribute__ ((packed));
339
340/* See SAS spec, command IU
341 */
342struct ssp_command_iu {
343 u8 lun[LUN_SIZE];
344 u8 _r_a;
345 u8 efb_prio_attr; /* enable first burst, task prio & attr */
346#define EFB_MASK 0x80
347#define TASK_PRIO_MASK 0x78
348#define TASK_ATTR_MASK 0x07
349
350 u8 _r_b;
351 u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */
352 union {
353 u8 cdb[16];
354 struct {
355 __le64 long_cdb_addr; /* bus address, LE */
356 __le32 long_cdb_size; /* LE */
357 u8 _r_c[3];
358 u8 eol_ds; /* eol:6,6, ds:5,4 */
359 } long_cdb; /* sequencer extension */
360 };
361} __attribute__ ((packed));
362
363struct xfer_rdy_iu {
364 __be32 requested_offset; /* BE */
365 __be32 write_data_len; /* BE */
366 __be32 _r_a;
367} __attribute__ ((packed));
368
369/* ---------- SCB tasks ---------- */
370
371/* This is both ssp_task and long_ssp_task
372 */
373struct initiate_ssp_task {
374 u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */
375 __le32 total_xfer_len;
376 struct ssp_frame_hdr ssp_frame;
377 struct ssp_command_iu ssp_cmd;
378 __le16 sister_scb; /* 0xFFFF */
379 __le16 conn_handle; /* index to DDB for the intended target */
380 u8 data_dir; /* :1,0 */
381#define DATA_DIR_NONE 0x00
382#define DATA_DIR_IN 0x01
383#define DATA_DIR_OUT 0x02
384#define DATA_DIR_BYRECIPIENT 0x03
385
386 u8 _r_a;
387 u8 retry_count;
388 u8 _r_b[5];
389 struct sg_el sg_element[3]; /* 2 real and 1 link */
390} __attribute__ ((packed));
391
392/* This defines both ata_task and atapi_task.
393 * ata: C bit of FIS should be 1,
394 * atapi: C bit of FIS should be 1, and command register should be 0xA0,
395 * to indicate a packet command.
396 */
397struct initiate_ata_task {
398 u8 proto_conn_rate;
399 __le32 total_xfer_len;
400 struct host_to_dev_fis fis;
401 __le32 data_offs;
402 u8 atapi_packet[16];
403 u8 _r_a[12];
404 __le16 sister_scb;
405 __le16 conn_handle;
406 u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */
407#define CSMI_TASK 0x40
408#define DATA_XFER_MODE_DMA 0x10
409#define ATA_Q_TYPE_MASK 0x08
410#define ATA_Q_TYPE_UNTAGGED 0x00
411#define ATA_Q_TYPE_NCQ 0x08
412
413 u8 _r_b;
414 u8 retry_count;
415 u8 _r_c;
416 u8 flags;
417#define STP_AFFIL_POLICY 0x20
418#define SET_AFFIL_POLICY 0x10
419#define RET_PARTIAL_SGLIST 0x02
420
421 u8 _r_d[3];
422 struct sg_el sg_element[3];
423} __attribute__ ((packed));
424
425struct initiate_smp_task {
426 u8 proto_conn_rate;
427 u8 _r_a[40];
428 struct sg_el smp_req;
429 __le16 sister_scb;
430 __le16 conn_handle;
431 u8 _r_c[8];
432 struct sg_el smp_resp;
433 u8 _r_d[32];
434} __attribute__ ((packed));
435
436struct control_phy {
437 u8 phy_id;
438 u8 sub_func;
439#define DISABLE_PHY 0x00
440#define ENABLE_PHY 0x01
441#define RELEASE_SPINUP_HOLD 0x02
442#define ENABLE_PHY_NO_SAS_OOB 0x03
443#define ENABLE_PHY_NO_SATA_OOB 0x04
444#define PHY_NO_OP 0x05
445#define EXECUTE_HARD_RESET 0x81
446
447 u8 func_mask;
448 u8 speed_mask;
449 u8 hot_plug_delay;
450 u8 port_type;
451 u8 flags;
452#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01
453#define DISABLE_PHY_IF_OOB_FAILS 0x02
454
455 __le32 timeout_override;
456 u8 link_reset_retries;
457 u8 _r_a[47];
458 __le16 conn_handle;
459 u8 _r_b[56];
460} __attribute__ ((packed));
461
462struct control_ata_dev {
463 u8 proto_conn_rate;
464 __le32 _r_a;
465 struct host_to_dev_fis fis;
466 u8 _r_b[32];
467 __le16 sister_scb;
468 __le16 conn_handle;
469 u8 ata_flags; /* 0 */
470 u8 _r_c[55];
471} __attribute__ ((packed));
472
473struct empty_scb {
474 u8 num_valid;
475 __le32 _r_a;
476#define ASD_EDBS_PER_SCB 7
477/* header+data+CRC+DMA suffix data */
478#define ASD_EDB_SIZE (24+1024+4+16)
479 struct sg_el eb[ASD_EDBS_PER_SCB];
480#define ELEMENT_NOT_VALID 0xC0
481} __attribute__ ((packed));
482
483struct initiate_link_adm {
484 u8 phy_id;
485 u8 sub_func;
486#define GET_LINK_ERROR_COUNT 0x00
487#define RESET_LINK_ERROR_COUNT 0x01
488#define ENABLE_NOTIFY_SPINUP_INTS 0x02
489
490 u8 _r_a[57];
491 __le16 conn_handle;
492 u8 _r_b[56];
493} __attribute__ ((packed));
494
495struct copy_memory {
496 u8 _r_a;
497 __le16 xfer_len;
498 __le16 _r_b;
499 __le64 src_busaddr;
500 u8 src_ds; /* See definition of sg_el */
501 u8 _r_c[45];
502 __le16 conn_handle;
503 __le64 _r_d;
504 __le64 dest_busaddr;
505 u8 dest_ds; /* See definition of sg_el */
506 u8 _r_e[39];
507} __attribute__ ((packed));
508
509struct abort_task {
510 u8 proto_conn_rate;
511 __le32 _r_a;
512 struct ssp_frame_hdr ssp_frame;
513 struct ssp_task_iu ssp_task;
514 __le16 sister_scb;
515 __le16 conn_handle;
516 u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */
517#define SUSPEND_DATA_TRANS 0x04
518
519 u8 _r_b;
520 u8 retry_count;
521 u8 _r_c[5];
522 __le16 index; /* Transaction context of task to be queried */
523 __le16 itnl_to;
524 u8 _r_d[44];
525} __attribute__ ((packed));
526
527struct clear_nexus {
528 u8 nexus;
529#define NEXUS_ADAPTER 0x00
530#define NEXUS_PORT 0x01
531#define NEXUS_I_T 0x02
532#define NEXUS_I_T_L 0x03
533#define NEXUS_TAG 0x04
534#define NEXUS_TRANS_CX 0x05
535#define NEXUS_SATA_TAG 0x06
536#define NEXUS_T_L 0x07
537#define NEXUS_L 0x08
538#define NEXUS_T_TAG 0x09
539
540 __le32 _r_a;
541 u8 flags;
542#define SUSPEND_TX 0x80
543#define RESUME_TX 0x40
544#define SEND_Q 0x04
545#define EXEC_Q 0x02
546#define NOTINQ 0x01
547
548 u8 _r_b[3];
549 u8 conn_mask;
550 u8 _r_c[19];
551 struct ssp_task_iu ssp_task; /* LUN and TAG */
552 __le16 _r_d;
553 __le16 conn_handle;
554 __le64 _r_e;
555 __le16 index; /* Transaction context of task to be cleared */
556 __le16 context; /* Clear nexus context */
557 u8 _r_f[44];
558} __attribute__ ((packed));
559
560struct initiate_ssp_tmf {
561 u8 proto_conn_rate;
562 __le32 _r_a;
563 struct ssp_frame_hdr ssp_frame;
564 struct ssp_task_iu ssp_task;
565 __le16 sister_scb;
566 __le16 conn_handle;
567 u8 flags; /* itnl override and suspend data tx */
568#define OVERRIDE_ITNL_TIMER 8
569
570 u8 _r_b;
571 u8 retry_count;
572 u8 _r_c[5];
573 __le16 index; /* Transaction context of task to be queried */
574 __le16 itnl_to;
575 u8 _r_d[44];
576} __attribute__ ((packed));
577
578/* Transmits an arbitrary primitive on the link.
579 * Used for NOTIFY and BROADCAST.
580 */
581struct send_prim {
582 u8 phy_id;
583 u8 wait_transmit; /* :0,0 */
584 u8 xmit_flags;
585#define XMTPSIZE_MASK 0xF0
586#define XMTPSIZE_SINGLE 0x10
587#define XMTPSIZE_REPEATED 0x20
588#define XMTPSIZE_CONT 0x20
589#define XMTPSIZE_TRIPLE 0x30
590#define XMTPSIZE_REDUNDANT 0x60
591#define XMTPSIZE_INF 0
592
593#define XMTCONTEN 0x04
594#define XMTPFRM 0x02 /* Transmit at the next frame boundary */
595#define XMTPIMM 0x01 /* Transmit immediately */
596
597 __le16 _r_a;
598 u8 prim[4]; /* K, D0, D1, D2 */
599 u8 _r_b[50];
600 __le16 conn_handle;
601 u8 _r_c[56];
602} __attribute__ ((packed));
603
604/* This describes both SSP Target Get Data and SSP Target Get Data And
605 * Send Good Response SCBs. Used when the sequencer is operating in
606 * target mode...
607 */
608struct ssp_targ_get_data {
609 u8 proto_conn_rate;
610 __le32 total_xfer_len;
611 struct ssp_frame_hdr ssp_frame;
612 struct xfer_rdy_iu xfer_rdy;
613 u8 lun[LUN_SIZE];
614 __le64 _r_a;
615 __le16 sister_scb;
616 __le16 conn_handle;
617 u8 data_dir; /* 01b */
618 u8 _r_b;
619 u8 retry_count;
620 u8 _r_c[5];
621 struct sg_el sg_element[3];
622} __attribute__ ((packed));
623
624/* ---------- The actual SCB struct ---------- */
625
626struct scb {
627 struct scb_header header;
628 union {
629 struct initiate_ssp_task ssp_task;
630 struct initiate_ata_task ata_task;
631 struct initiate_smp_task smp_task;
632 struct control_phy control_phy;
633 struct control_ata_dev control_ata_dev;
634 struct empty_scb escb;
635 struct initiate_link_adm link_adm;
636 struct copy_memory cp_mem;
637 struct abort_task abort_task;
638 struct clear_nexus clear_nexus;
639 struct initiate_ssp_tmf ssp_tmf;
640 };
641} __attribute__ ((packed));
642
643/* ---------- Done List ---------- */
644/* The done list entry opcode field is defined below.
645 * The mnemonic encoding and meaning is as follows:
646 * TC - Task Complete, status was received and acknowledged
647 * TF - Task Failed, indicates an error prior to receiving acknowledgment
648 * for the command:
649 * - no conn,
650 * - NACK or R_ERR received in response to this command,
651 * - credit blocked or not available, or in the case of SMP request,
652 * - no SMP response was received.
653 * In these four cases it is known that the target didn't receive the
654 * command.
655 * TI - Task Interrupted, error after the command was acknowledged. It is
656 * known that the command was received by the target.
657 * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK
658 * (R_ERR) was received due to loss of signal, broken connection, loss of
659 * dword sync or other reason. The application client should send the
660 * appropriate task query.
661 * TA - Task Aborted, see TF.
662 * _RESP - The completion includes an empty buffer containing status.
663 * TO - Timeout.
664 */
665#define TC_NO_ERROR 0x00
666#define TC_UNDERRUN 0x01
667#define TC_OVERRUN 0x02
668#define TF_OPEN_TO 0x03
669#define TF_OPEN_REJECT 0x04
670#define TI_BREAK 0x05
671#define TI_PROTO_ERR 0x06
672#define TC_SSP_RESP 0x07
673#define TI_PHY_DOWN 0x08
674#define TF_PHY_DOWN 0x09
675#define TC_LINK_ADM_RESP 0x0a
676#define TC_CSMI 0x0b
677#define TC_ATA_RESP 0x0c
678#define TU_PHY_DOWN 0x0d
679#define TU_BREAK 0x0e
680#define TI_SATA_TO 0x0f
681#define TI_NAK 0x10
682#define TC_CONTROL_PHY 0x11
683#define TF_BREAK 0x12
684#define TC_RESUME 0x13
685#define TI_ACK_NAK_TO 0x14
686#define TF_SMPRSP_TO 0x15
687#define TF_SMP_XMIT_RCV_ERR 0x16
688#define TC_PARTIAL_SG_LIST 0x17
689#define TU_ACK_NAK_TO 0x18
690#define TU_SATA_TO 0x19
691#define TF_NAK_RECV 0x1a
692#define TA_I_T_NEXUS_LOSS 0x1b
693#define TC_ATA_R_ERR_RECV 0x1c
694#define TF_TMF_NO_CTX 0x1d
695#define TA_ON_REQ 0x1e
696#define TF_TMF_NO_TAG 0x1f
697#define TF_TMF_TAG_FREE 0x20
698#define TF_TMF_TASK_DONE 0x21
699#define TF_TMF_NO_CONN_HANDLE 0x22
700#define TC_TASK_CLEARED 0x23
701#define TI_SYNCS_RECV 0x24
702#define TU_SYNCS_RECV 0x25
703#define TF_IRTT_TO 0x26
704#define TF_NO_SMP_CONN 0x27
705#define TF_IU_SHORT 0x28
706#define TF_DATA_OFFS_ERR 0x29
707#define TF_INV_CONN_HANDLE 0x2a
708#define TF_REQUESTED_N_PENDING 0x2b
709
710/* 0xc1 - 0xc7: empty buffer received,
711 0xd1 - 0xd7: establish nexus empty buffer received
712*/
713/* This is the ESCB mask */
714#define ESCB_RECVD 0xC0
715
716
717/* This struct done_list_struct defines the done list entry.
718 * All fields are LE.
719 */
720struct done_list_struct {
721 __le16 index; /* aka transaction context */
722 u8 opcode;
723 u8 status_block[4];
724 u8 toggle; /* bit 0 */
725#define DL_TOGGLE_MASK 0x01
726} __attribute__ ((packed));
727
728/* ---------- PHYS ---------- */
729
730struct asd_phy {
731 struct asd_sas_phy sas_phy;
732 struct asd_phy_desc *phy_desc; /* hw profile */
733
734 struct sas_identify_frame *identify_frame;
735 struct asd_dma_tok *id_frm_tok;
736
737 u8 frame_rcvd[ASD_EDB_SIZE];
738};
739
740
741#define ASD_SCB_SIZE sizeof(struct scb)
742#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port)
743
744/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent.
745 * Default: 0x10 (it's a mask)
746 */
747#define ASD_NOTIFY_ENABLE_SPINUP 0x10
748
749/* If enabled, set this to the interval between transmission
750 * of NOTIFY (ENABLE SPINUP). In units of 200 us.
751 */
752#define ASD_NOTIFY_TIMEOUT 2500
753
754/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP).
755 * If 0, transmit immediately. In milliseconds.
756 */
757#define ASD_NOTIFY_DOWN_COUNT 0
758
759/* Device present timer timeout constant, 10 ms. */
760#define ASD_DEV_PRESENT_TIMEOUT 0x2710
761
762#define ASD_SATA_INTERLOCK_TIMEOUT 0
763
764/* How long to wait before shutting down an STP connection, unless
765 * an STP target sent frame(s). 50 usec.
766 * IGNORED by the sequencer (i.e. value 0 always).
767 */
768#define ASD_STP_SHUTDOWN_TIMEOUT 0x0
769
770/* ATA soft reset timer timeout. 5 usec. */
771#define ASD_SRST_ASSERT_TIMEOUT 0x05
772
773/* 31 sec */
774#define ASD_RCV_FIS_TIMEOUT 0x01D905C0
775
776#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8
777
778/* COMINIT timer */
779#define ASD_TEN_MILLISEC_TIMEOUT 0x2710
780#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT
781
782/* 1 sec */
783#define ASD_SMP_RCV_TIMEOUT 0x000F4240
784
785#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
new file mode 100644
index 000000000000..7ee49b51b724
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -0,0 +1,758 @@
1/*
2 * Aic94xx SAS/SATA driver SCB management.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28
29#include "aic94xx.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_hwi.h"
32#include "aic94xx_seq.h"
33
34#include "aic94xx_dump.h"
35
36/* ---------- EMPTY SCB ---------- */
37
38#define DL_PHY_MASK 7
39#define BYTES_DMAED 0
40#define PRIMITIVE_RECVD 0x08
41#define PHY_EVENT 0x10
42#define LINK_RESET_ERROR 0x18
43#define TIMER_EVENT 0x20
44#define REQ_TASK_ABORT 0xF0
45#define REQ_DEVICE_RESET 0xF1
46#define SIGNAL_NCQ_ERROR 0xF2
47#define CLEAR_NCQ_ERROR 0xF3
48
49#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \
50 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
51 | CURRENT_OOB_ERROR)
52
53static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
54{
55 struct sas_phy *sas_phy = phy->sas_phy.phy;
56
57 switch (oob_mode & 7) {
58 case PHY_SPEED_60:
59 /* FIXME: sas transport class doesn't have this */
60 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
61 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
62 break;
63 case PHY_SPEED_30:
64 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
65 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
66 break;
67 case PHY_SPEED_15:
68 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
69 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
70 break;
71 }
72 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
73 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
74 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
75 sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate;
76 sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate;
77
78 if (oob_mode & SAS_MODE)
79 phy->sas_phy.oob_mode = SAS_OOB_MODE;
80 else if (oob_mode & SATA_MODE)
81 phy->sas_phy.oob_mode = SATA_OOB_MODE;
82}
83
84static inline void asd_phy_event_tasklet(struct asd_ascb *ascb,
85 struct done_list_struct *dl)
86{
87 struct asd_ha_struct *asd_ha = ascb->ha;
88 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
89 int phy_id = dl->status_block[0] & DL_PHY_MASK;
90 struct asd_phy *phy = &asd_ha->phys[phy_id];
91
92 u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS;
93 u8 oob_mode = dl->status_block[2];
94
95 switch (oob_status) {
96 case CURRENT_LOSS_OF_SIGNAL:
97 /* directly attached device was removed */
98 ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
99 asd_turn_led(asd_ha, phy_id, 0);
100 sas_phy_disconnected(&phy->sas_phy);
101 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
102 break;
103 case CURRENT_OOB_DONE:
104 /* hot plugged device */
105 asd_turn_led(asd_ha, phy_id, 1);
106 get_lrate_mode(phy, oob_mode);
107 ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
108 phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
109 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
110 break;
111 case CURRENT_SPINUP_HOLD:
112 /* hot plug SATA, no COMWAKE sent */
113 asd_turn_led(asd_ha, phy_id, 1);
114 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
115 break;
116 case CURRENT_GTO_TIMEOUT:
117 case CURRENT_OOB_ERROR:
118 ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id,
119 dl->status_block[1]);
120 asd_turn_led(asd_ha, phy_id, 0);
121 sas_phy_disconnected(&phy->sas_phy);
122 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
123 break;
124 }
125}
126
127/* If phys are enabled sparsely, this will do the right thing. */
128static inline unsigned ord_phy(struct asd_ha_struct *asd_ha,
129 struct asd_phy *phy)
130{
131 u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
132 int i, k = 0;
133
134 for_each_phy(enabled_mask, enabled_mask, i) {
135 if (&asd_ha->phys[i] == phy)
136 return k;
137 k++;
138 }
139 return 0;
140}
141
142/**
143 * asd_get_attached_sas_addr -- extract/generate attached SAS address
144 * phy: pointer to asd_phy
145 * sas_addr: pointer to buffer where the SAS address is to be written
146 *
147 * This function extracts the SAS address from an IDENTIFY frame
148 * received. If OOB is SATA, then a SAS address is generated from the
149 * HA tables.
150 *
151 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
152 * buffer.
153 */
154static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
155{
156 if (phy->sas_phy.frame_rcvd[0] == 0x34
157 && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
158 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
159 /* FIS device-to-host */
160 u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr);
161
162 addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy);
163 *(__be64 *)sas_addr = cpu_to_be64(addr);
164 } else {
165 struct sas_identify_frame *idframe =
166 (void *) phy->sas_phy.frame_rcvd;
167 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
168 }
169}
170
171static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
172 struct done_list_struct *dl,
173 int edb_id, int phy_id)
174{
175 unsigned long flags;
176 int edb_el = edb_id + ascb->edb_index;
177 struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
178 struct asd_phy *phy = &ascb->ha->phys[phy_id];
179 struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
180 u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
181
182 size = min(size, (u16) sizeof(phy->frame_rcvd));
183
184 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
185 memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
186 phy->sas_phy.frame_rcvd_size = size;
187 asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
188 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
189 asd_dump_frame_rcvd(phy, dl);
190 sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
191}
192
193static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
194 struct done_list_struct *dl,
195 int phy_id)
196{
197 struct asd_ha_struct *asd_ha = ascb->ha;
198 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
199 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
200 u8 lr_error = dl->status_block[1];
201 u8 retries_left = dl->status_block[2];
202
203 switch (lr_error) {
204 case 0:
205 ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id);
206 break;
207 case 1:
208 ASD_DPRINTK("phy%d: Loss of signal\n", phy_id);
209 break;
210 case 2:
211 ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id);
212 break;
213 case 3:
214 ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id);
215 break;
216 default:
217 ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n",
218 phy_id, lr_error);
219 break;
220 }
221
222 asd_turn_led(asd_ha, phy_id, 0);
223 sas_phy_disconnected(sas_phy);
224 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
225
226 if (retries_left == 0) {
227 int num = 1;
228 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
229 GFP_ATOMIC);
230 if (!cp) {
231 asd_printk("%s: out of memory\n", __FUNCTION__);
232 goto out;
233 }
234 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
235 phy_id);
236 asd_build_control_phy(cp, phy_id, ENABLE_PHY);
237 if (asd_post_ascb_list(ascb->ha, cp, 1) != 0)
238 asd_ascb_free(cp);
239 }
240out:
241 ;
242}
243
244static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
245 struct done_list_struct *dl,
246 int phy_id)
247{
248 unsigned long flags;
249 struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
250 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
251 u8 reg = dl->status_block[1];
252 u32 cont = dl->status_block[2] << ((reg & 3)*8);
253
254 reg &= ~3;
255 switch (reg) {
256 case LmPRMSTAT0BYTE0:
257 switch (cont) {
258 case LmBROADCH:
259 case LmBROADRVCH0:
260 case LmBROADRVCH1:
261 case LmBROADSES:
262 ASD_DPRINTK("phy%d: BROADCAST change received:%d\n",
263 phy_id, cont);
264 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
265 sas_phy->sas_prim = ffs(cont);
266 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
267 sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
268 break;
269
270 case LmUNKNOWNP:
271 ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id);
272 break;
273
274 default:
275 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
276 phy_id, reg, cont);
277 break;
278 }
279 break;
280 case LmPRMSTAT1BYTE0:
281 switch (cont) {
282 case LmHARDRST:
283 ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n",
284 phy_id);
285 /* The sequencer disables all phys on that port.
286 * We have to re-enable the phys ourselves. */
287 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
288 break;
289
290 default:
291 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
292 phy_id, reg, cont);
293 break;
294 }
295 break;
296 default:
297 ASD_DPRINTK("unknown primitive register:0x%x\n",
298 dl->status_block[1]);
299 break;
300 }
301}
302
303/**
304 * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB
305 * @ascb: pointer to Empty SCB
306 * @edb_id: index [0,6] to the empty data buffer which is to be invalidated
307 *
308 * After an EDB has been invalidated, if all EDBs in this ESCB have been
309 * invalidated, the ESCB is posted back to the sequencer.
310 * Context is tasklet/IRQ.
311 */
312void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
313{
314 struct asd_seq_data *seq = &ascb->ha->seq;
315 struct empty_scb *escb = &ascb->scb->escb;
316 struct sg_el *eb = &escb->eb[edb_id];
317 struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id];
318
319 memset(edb->vaddr, 0, ASD_EDB_SIZE);
320 eb->flags |= ELEMENT_NOT_VALID;
321 escb->num_valid--;
322
323 if (escb->num_valid == 0) {
324 int i;
325 /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, "
326 "dma_handle: 0x%08llx, next: 0x%08llx, "
327 "index:%d, opcode:0x%02x\n",
328 ascb->dma_scb.vaddr,
329 (u64)ascb->dma_scb.dma_handle,
330 le64_to_cpu(ascb->scb->header.next_scb),
331 le16_to_cpu(ascb->scb->header.index),
332 ascb->scb->header.opcode);
333 */
334 escb->num_valid = ASD_EDBS_PER_SCB;
335 for (i = 0; i < ASD_EDBS_PER_SCB; i++)
336 escb->eb[i].flags = 0;
337 if (!list_empty(&ascb->list))
338 list_del_init(&ascb->list);
339 i = asd_post_escb_list(ascb->ha, ascb, 1);
340 if (i)
341 asd_printk("couldn't post escb, err:%d\n", i);
342 }
343}
344
345static void escb_tasklet_complete(struct asd_ascb *ascb,
346 struct done_list_struct *dl)
347{
348 struct asd_ha_struct *asd_ha = ascb->ha;
349 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
350 int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */
351 u8 sb_opcode = dl->status_block[0];
352 int phy_id = sb_opcode & DL_PHY_MASK;
353 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
354
355 if (edb > 6 || edb < 0) {
356 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
357 edb, dl->opcode);
358 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
359 sb_opcode, phy_id);
360 ASD_DPRINTK("escb: vaddr: 0x%p, "
361 "dma_handle: 0x%llx, next: 0x%llx, "
362 "index:%d, opcode:0x%02x\n",
363 ascb->dma_scb.vaddr,
364 (unsigned long long)ascb->dma_scb.dma_handle,
365 (unsigned long long)
366 le64_to_cpu(ascb->scb->header.next_scb),
367 le16_to_cpu(ascb->scb->header.index),
368 ascb->scb->header.opcode);
369 }
370
371 sb_opcode &= ~DL_PHY_MASK;
372
373 switch (sb_opcode) {
374 case BYTES_DMAED:
375 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
376 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
377 break;
378 case PRIMITIVE_RECVD:
379 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
380 phy_id);
381 asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
382 break;
383 case PHY_EVENT:
384 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
385 asd_phy_event_tasklet(ascb, dl);
386 break;
387 case LINK_RESET_ERROR:
388 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
389 phy_id);
390 asd_link_reset_err_tasklet(ascb, dl, phy_id);
391 break;
392 case TIMER_EVENT:
393 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
394 __FUNCTION__, phy_id);
395 asd_turn_led(asd_ha, phy_id, 0);
396 /* the device is gone */
397 sas_phy_disconnected(sas_phy);
398 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
399 break;
400 case REQ_TASK_ABORT:
401 ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
402 phy_id);
403 break;
404 case REQ_DEVICE_RESET:
405 ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
406 phy_id);
407 break;
408 case SIGNAL_NCQ_ERROR:
409 ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
410 phy_id);
411 break;
412 case CLEAR_NCQ_ERROR:
413 ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
414 phy_id);
415 break;
416 default:
417 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
418 phy_id, sb_opcode);
419 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
420 edb, dl->opcode);
421 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
422 sb_opcode, phy_id);
423 ASD_DPRINTK("escb: vaddr: 0x%p, "
424 "dma_handle: 0x%llx, next: 0x%llx, "
425 "index:%d, opcode:0x%02x\n",
426 ascb->dma_scb.vaddr,
427 (unsigned long long)ascb->dma_scb.dma_handle,
428 (unsigned long long)
429 le64_to_cpu(ascb->scb->header.next_scb),
430 le16_to_cpu(ascb->scb->header.index),
431 ascb->scb->header.opcode);
432
433 break;
434 }
435
436 asd_invalidate_edb(ascb, edb);
437}
438
439int asd_init_post_escbs(struct asd_ha_struct *asd_ha)
440{
441 struct asd_seq_data *seq = &asd_ha->seq;
442 int i;
443
444 for (i = 0; i < seq->num_escbs; i++)
445 seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete;
446
447 ASD_DPRINTK("posting %d escbs\n", i);
448 return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs);
449}
450
451/* ---------- CONTROL PHY ---------- */
452
453#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \
454 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
455 | CURRENT_OOB_ERROR)
456
457/**
458 * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb
459 * @ascb: pointer to an ascb
460 * @dl: pointer to the done list entry
461 *
462 * This function completes a CONTROL PHY scb and frees the ascb.
463 * A note on LEDs:
464 * - an LED blinks if there is IO though it,
465 * - if a device is connected to the LED, it is lit,
466 * - if no device is connected to the LED, is is dimmed (off).
467 */
468static void control_phy_tasklet_complete(struct asd_ascb *ascb,
469 struct done_list_struct *dl)
470{
471 struct asd_ha_struct *asd_ha = ascb->ha;
472 struct scb *scb = ascb->scb;
473 struct control_phy *control_phy = &scb->control_phy;
474 u8 phy_id = control_phy->phy_id;
475 struct asd_phy *phy = &ascb->ha->phys[phy_id];
476
477 u8 status = dl->status_block[0];
478 u8 oob_status = dl->status_block[1];
479 u8 oob_mode = dl->status_block[2];
480 /* u8 oob_signals= dl->status_block[3]; */
481
482 if (status != 0) {
483 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
484 __FUNCTION__, phy_id, status);
485 goto out;
486 }
487
488 switch (control_phy->sub_func) {
489 case DISABLE_PHY:
490 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
491 asd_turn_led(asd_ha, phy_id, 0);
492 asd_control_led(asd_ha, phy_id, 0);
493 ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
494 break;
495
496 case ENABLE_PHY:
497 asd_control_led(asd_ha, phy_id, 1);
498 if (oob_status & CURRENT_OOB_DONE) {
499 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
500 get_lrate_mode(phy, oob_mode);
501 asd_turn_led(asd_ha, phy_id, 1);
502 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
503 __FUNCTION__, phy_id,phy->sas_phy.linkrate,
504 phy->sas_phy.iproto);
505 } else if (oob_status & CURRENT_SPINUP_HOLD) {
506 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
507 asd_turn_led(asd_ha, phy_id, 1);
508 ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
509 phy_id);
510 } else if (oob_status & CURRENT_ERR_MASK) {
511 asd_turn_led(asd_ha, phy_id, 0);
512 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
513 __FUNCTION__, phy_id, oob_status);
514 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
515 | CURRENT_DEVICE_PRESENT)) {
516 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
517 asd_turn_led(asd_ha, phy_id, 1);
518 ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
519 __FUNCTION__, phy_id);
520 } else {
521 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
522 asd_turn_led(asd_ha, phy_id, 0);
523 ASD_DPRINTK("%s: phy%d: no device present: "
524 "oob_status:0x%x\n",
525 __FUNCTION__, phy_id, oob_status);
526 }
527 break;
528 case RELEASE_SPINUP_HOLD:
529 case PHY_NO_OP:
530 case EXECUTE_HARD_RESET:
531 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
532 phy_id, control_phy->sub_func);
533 /* XXX finish */
534 break;
535 default:
536 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
537 phy_id, control_phy->sub_func);
538 break;
539 }
540out:
541 asd_ascb_free(ascb);
542}
543
544static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
545{
546 /* disable all speeds, then enable defaults */
547 *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
548 | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS;
549
550 switch (pd->max_sas_lrate) {
551 case SAS_LINK_RATE_6_0_GBPS:
552 *speed_mask &= ~SAS_SPEED_60_DIS;
553 default:
554 case SAS_LINK_RATE_3_0_GBPS:
555 *speed_mask &= ~SAS_SPEED_30_DIS;
556 case SAS_LINK_RATE_1_5_GBPS:
557 *speed_mask &= ~SAS_SPEED_15_DIS;
558 }
559
560 switch (pd->min_sas_lrate) {
561 case SAS_LINK_RATE_6_0_GBPS:
562 *speed_mask |= SAS_SPEED_30_DIS;
563 case SAS_LINK_RATE_3_0_GBPS:
564 *speed_mask |= SAS_SPEED_15_DIS;
565 default:
566 case SAS_LINK_RATE_1_5_GBPS:
567 /* nothing to do */
568 ;
569 }
570
571 switch (pd->max_sata_lrate) {
572 case SAS_LINK_RATE_3_0_GBPS:
573 *speed_mask &= ~SATA_SPEED_30_DIS;
574 default:
575 case SAS_LINK_RATE_1_5_GBPS:
576 *speed_mask &= ~SATA_SPEED_15_DIS;
577 }
578
579 switch (pd->min_sata_lrate) {
580 case SAS_LINK_RATE_3_0_GBPS:
581 *speed_mask |= SATA_SPEED_15_DIS;
582 default:
583 case SAS_LINK_RATE_1_5_GBPS:
584 /* nothing to do */
585 ;
586 }
587}
588
589/**
590 * asd_build_control_phy -- build a CONTROL PHY SCB
591 * @ascb: pointer to an ascb
592 * @phy_id: phy id to control, integer
593 * @subfunc: subfunction, what to actually to do the phy
594 *
595 * This function builds a CONTROL PHY scb. No allocation of any kind
596 * is performed. @ascb is allocated with the list function.
597 * The caller can override the ascb->tasklet_complete to point
598 * to its own callback function. It must call asd_ascb_free()
599 * at its tasklet complete function.
600 * See the default implementation.
601 */
602void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
603{
604 struct asd_phy *phy = &ascb->ha->phys[phy_id];
605 struct scb *scb = ascb->scb;
606 struct control_phy *control_phy = &scb->control_phy;
607
608 scb->header.opcode = CONTROL_PHY;
609 control_phy->phy_id = (u8) phy_id;
610 control_phy->sub_func = subfunc;
611
612 switch (subfunc) {
613 case EXECUTE_HARD_RESET: /* 0x81 */
614 case ENABLE_PHY: /* 0x01 */
615 /* decide hot plug delay */
616 control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT;
617
618 /* decide speed mask */
619 set_speed_mask(&control_phy->speed_mask, phy->phy_desc);
620
621 /* initiator port settings are in the hi nibble */
622 if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
623 control_phy->port_type = SAS_PROTO_ALL << 4;
624 else if (phy->sas_phy.role == PHY_ROLE_TARGET)
625 control_phy->port_type = SAS_PROTO_ALL;
626 else
627 control_phy->port_type =
628 (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL;
629
630 /* link reset retries, this should be nominal */
631 control_phy->link_reset_retries = 10;
632
633 case RELEASE_SPINUP_HOLD: /* 0x02 */
634 /* decide the func_mask */
635 control_phy->func_mask = FUNCTION_MASK_DEFAULT;
636 if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD)
637 control_phy->func_mask &= ~SPINUP_HOLD_DIS;
638 else
639 control_phy->func_mask |= SPINUP_HOLD_DIS;
640 }
641
642 control_phy->conn_handle = cpu_to_le16(0xFFFF);
643
644 ascb->tasklet_complete = control_phy_tasklet_complete;
645}
646
647/* ---------- INITIATE LINK ADM TASK ---------- */
648
649static void link_adm_tasklet_complete(struct asd_ascb *ascb,
650 struct done_list_struct *dl)
651{
652 u8 opcode = dl->opcode;
653 struct initiate_link_adm *link_adm = &ascb->scb->link_adm;
654 u8 phy_id = link_adm->phy_id;
655
656 if (opcode != TC_NO_ERROR) {
657 asd_printk("phy%d: link adm task 0x%x completed with error "
658 "0x%x\n", phy_id, link_adm->sub_func, opcode);
659 }
660 ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n",
661 phy_id, link_adm->sub_func, opcode);
662
663 asd_ascb_free(ascb);
664}
665
666void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
667 u8 subfunc)
668{
669 struct scb *scb = ascb->scb;
670 struct initiate_link_adm *link_adm = &scb->link_adm;
671
672 scb->header.opcode = INITIATE_LINK_ADM_TASK;
673
674 link_adm->phy_id = phy_id;
675 link_adm->sub_func = subfunc;
676 link_adm->conn_handle = cpu_to_le16(0xFFFF);
677
678 ascb->tasklet_complete = link_adm_tasklet_complete;
679}
680
681/* ---------- SCB timer ---------- */
682
683/**
684 * asd_ascb_timedout -- called when a pending SCB's timer has expired
685 * @data: unsigned long, a pointer to the ascb in question
686 *
687 * This is the default timeout function which does the most necessary.
688 * Upper layers can implement their own timeout function, say to free
689 * resources they have with this SCB, and then call this one at the
690 * end of their timeout function. To do this, one should initialize
691 * the ascb->timer.{function, data, expires} prior to calling the post
692 * funcion. The timer is started by the post function.
693 */
694void asd_ascb_timedout(unsigned long data)
695{
696 struct asd_ascb *ascb = (void *) data;
697 struct asd_seq_data *seq = &ascb->ha->seq;
698 unsigned long flags;
699
700 ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode);
701
702 spin_lock_irqsave(&seq->pend_q_lock, flags);
703 seq->pending--;
704 list_del_init(&ascb->list);
705 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
706
707 asd_ascb_free(ascb);
708}
709
710/* ---------- CONTROL PHY ---------- */
711
712/* Given the spec value, return a driver value. */
713static const int phy_func_table[] = {
714 [PHY_FUNC_NOP] = PHY_NO_OP,
715 [PHY_FUNC_LINK_RESET] = ENABLE_PHY,
716 [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET,
717 [PHY_FUNC_DISABLE] = DISABLE_PHY,
718 [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD,
719};
720
721int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
722{
723 struct asd_ha_struct *asd_ha = phy->ha->lldd_ha;
724 struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc;
725 struct asd_ascb *ascb;
726 struct sas_phy_linkrates *rates;
727 int res = 1;
728
729 switch (func) {
730 case PHY_FUNC_CLEAR_ERROR_LOG:
731 return -ENOSYS;
732 case PHY_FUNC_SET_LINK_RATE:
733 rates = arg;
734 if (rates->minimum_linkrate) {
735 pd->min_sas_lrate = rates->minimum_linkrate;
736 pd->min_sata_lrate = rates->minimum_linkrate;
737 }
738 if (rates->maximum_linkrate) {
739 pd->max_sas_lrate = rates->maximum_linkrate;
740 pd->max_sata_lrate = rates->maximum_linkrate;
741 }
742 func = PHY_FUNC_LINK_RESET;
743 break;
744 default:
745 break;
746 }
747
748 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
749 if (!ascb)
750 return -ENOMEM;
751
752 asd_build_control_phy(ascb, phy->id, phy_func_table[func]);
753 res = asd_post_ascb_list(asd_ha, ascb , 1);
754 if (res)
755 asd_ascb_free(ascb);
756
757 return res;
758}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
new file mode 100644
index 000000000000..83574b5b4e69
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -0,0 +1,1089 @@
1/*
2 * Aic94xx SAS/SATA driver access to shared data structures and memory
3 * maps.
4 *
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 */
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33
34/* ---------- OCM stuff ---------- */
35
36struct asd_ocm_dir_ent {
37 u8 type;
38 u8 offs[3];
39 u8 _r1;
40 u8 size[3];
41} __attribute__ ((packed));
42
43struct asd_ocm_dir {
44 char sig[2];
45 u8 _r1[2];
46 u8 major; /* 0 */
47 u8 minor; /* 0 */
48 u8 _r2;
49 u8 num_de;
50 struct asd_ocm_dir_ent entry[15];
51} __attribute__ ((packed));
52
53#define OCM_DE_OCM_DIR 0x00
54#define OCM_DE_WIN_DRVR 0x01
55#define OCM_DE_BIOS_CHIM 0x02
56#define OCM_DE_RAID_ENGN 0x03
57#define OCM_DE_BIOS_INTL 0x04
58#define OCM_DE_BIOS_CHIM_OSM 0x05
59#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06
60#define OCM_DE_ADDC2C_RES0 0x07
61#define OCM_DE_ADDC2C_RES1 0x08
62#define OCM_DE_ADDC2C_RES2 0x09
63#define OCM_DE_ADDC2C_RES3 0x0A
64
65#define OCM_INIT_DIR_ENTRIES 5
66/***************************************************************************
67* OCM dircetory default
68***************************************************************************/
69static struct asd_ocm_dir OCMDirInit =
70{
71 .sig = {0x4D, 0x4F}, /* signature */
72 .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */
73};
74
75/***************************************************************************
76* OCM dircetory Entries default
77***************************************************************************/
78static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] =
79{
80 {
81 .type = (OCM_DE_ADDC2C_RES0), /* Entry type */
82 .offs = {128}, /* Offset */
83 .size = {0, 4}, /* size */
84 },
85 {
86 .type = (OCM_DE_ADDC2C_RES1), /* Entry type */
87 .offs = {128, 4}, /* Offset */
88 .size = {0, 4}, /* size */
89 },
90 {
91 .type = (OCM_DE_ADDC2C_RES2), /* Entry type */
92 .offs = {128, 8}, /* Offset */
93 .size = {0, 4}, /* size */
94 },
95 {
96 .type = (OCM_DE_ADDC2C_RES3), /* Entry type */
97 .offs = {128, 12}, /* Offset */
98 .size = {0, 4}, /* size */
99 },
100 {
101 .type = (OCM_DE_WIN_DRVR), /* Entry type */
102 .offs = {128, 16}, /* Offset */
103 .size = {128, 235, 1}, /* size */
104 },
105};
106
107struct asd_bios_chim_struct {
108 char sig[4];
109 u8 major; /* 1 */
110 u8 minor; /* 0 */
111 u8 bios_major;
112 u8 bios_minor;
113 __le32 bios_build;
114 u8 flags;
115 u8 pci_slot;
116 __le16 ue_num;
117 __le16 ue_size;
118 u8 _r[14];
119 /* The unit element array is right here.
120 */
121} __attribute__ ((packed));
122
123/**
124 * asd_read_ocm_seg - read an on chip memory (OCM) segment
125 * @asd_ha: pointer to the host adapter structure
126 * @buffer: where to write the read data
127 * @offs: offset into OCM where to read from
128 * @size: how many bytes to read
129 *
130 * Return the number of bytes not read. Return 0 on success.
131 */
132static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
133 u32 offs, int size)
134{
135 u8 *p = buffer;
136 if (unlikely(asd_ha->iospace))
137 asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
138 else {
139 for ( ; size > 0; size--, offs++, p++)
140 *p = asd_read_ocm_byte(asd_ha, offs);
141 }
142 return size;
143}
144
145static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha,
146 struct asd_ocm_dir *dir, u32 offs)
147{
148 int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir));
149 if (err) {
150 ASD_DPRINTK("couldn't read ocm segment\n");
151 return err;
152 }
153
154 if (dir->sig[0] != 'M' || dir->sig[1] != 'O') {
155 ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n",
156 dir->sig[0], dir->sig[1]);
157 return -ENOENT;
158 }
159 if (dir->major != 0) {
160 asd_printk("unsupported major version of ocm dir:0x%x\n",
161 dir->major);
162 return -ENOENT;
163 }
164 dir->num_de &= 0xf;
165 return 0;
166}
167
168/**
169 * asd_write_ocm_seg - write an on chip memory (OCM) segment
170 * @asd_ha: pointer to the host adapter structure
171 * @buffer: where to read the write data
172 * @offs: offset into OCM to write to
173 * @size: how many bytes to write
174 *
175 * Return the number of bytes not written. Return 0 on success.
176 */
177static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
178 u32 offs, int size)
179{
180 u8 *p = buffer;
181 if (unlikely(asd_ha->iospace))
182 asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
183 else {
184 for ( ; size > 0; size--, offs++, p++)
185 asd_write_ocm_byte(asd_ha, offs, *p);
186 }
187 return;
188}
189
190#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16))
191
192static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type,
193 u32 *offs, u32 *size)
194{
195 int i;
196 struct asd_ocm_dir_ent *ent;
197
198 for (i = 0; i < dir->num_de; i++) {
199 if (dir->entry[i].type == type)
200 break;
201 }
202 if (i >= dir->num_de)
203 return -ENOENT;
204 ent = &dir->entry[i];
205 *offs = (u32) THREE_TO_NUM(ent->offs);
206 *size = (u32) THREE_TO_NUM(ent->size);
207 return 0;
208}
209
210#define OCM_BIOS_CHIM_DE 2
211#define BC_BIOS_PRESENT 1
212
213static int asd_get_bios_chim(struct asd_ha_struct *asd_ha,
214 struct asd_ocm_dir *dir)
215{
216 int err;
217 struct asd_bios_chim_struct *bc_struct;
218 u32 offs, size;
219
220 err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size);
221 if (err) {
222 ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n");
223 goto out;
224 }
225 err = -ENOMEM;
226 bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL);
227 if (!bc_struct) {
228 asd_printk("no memory for bios_chim struct\n");
229 goto out;
230 }
231 err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs,
232 sizeof(*bc_struct));
233 if (err) {
234 ASD_DPRINTK("couldn't read ocm segment\n");
235 goto out2;
236 }
237 if (strncmp(bc_struct->sig, "SOIB", 4)
238 && strncmp(bc_struct->sig, "IPSA", 4)) {
239 ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n",
240 bc_struct->sig[0], bc_struct->sig[1],
241 bc_struct->sig[2], bc_struct->sig[3]);
242 err = -ENOENT;
243 goto out2;
244 }
245 if (bc_struct->major != 1) {
246 asd_printk("BIOS_CHIM unsupported major version:0x%x\n",
247 bc_struct->major);
248 err = -ENOENT;
249 goto out2;
250 }
251 if (bc_struct->flags & BC_BIOS_PRESENT) {
252 asd_ha->hw_prof.bios.present = 1;
253 asd_ha->hw_prof.bios.maj = bc_struct->bios_major;
254 asd_ha->hw_prof.bios.min = bc_struct->bios_minor;
255 asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build);
256 ASD_DPRINTK("BIOS present (%d,%d), %d\n",
257 asd_ha->hw_prof.bios.maj,
258 asd_ha->hw_prof.bios.min,
259 asd_ha->hw_prof.bios.bld);
260 }
261 asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num);
262 asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size);
263 ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num,
264 asd_ha->hw_prof.ue.size);
265 size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size;
266 if (size > 0) {
267 err = -ENOMEM;
268 asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL);
269 if (!asd_ha->hw_prof.ue.area)
270 goto out2;
271 err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area,
272 offs + sizeof(*bc_struct), size);
273 if (err) {
274 kfree(asd_ha->hw_prof.ue.area);
275 asd_ha->hw_prof.ue.area = NULL;
276 asd_ha->hw_prof.ue.num = 0;
277 asd_ha->hw_prof.ue.size = 0;
278 ASD_DPRINTK("couldn't read ue entries(%d)\n", err);
279 }
280 }
281out2:
282 kfree(bc_struct);
283out:
284 return err;
285}
286
287static void
288asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha)
289{
290 int i;
291
292 /* Zero OCM */
293 for (i = 0; i < OCM_MAX_SIZE; i += 4)
294 asd_write_ocm_dword(asd_ha, i, 0);
295
296 /* Write Dir */
297 asd_write_ocm_seg(asd_ha, &OCMDirInit, 0,
298 sizeof(struct asd_ocm_dir));
299
300 /* Write Dir Entries */
301 for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++)
302 asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i],
303 sizeof(struct asd_ocm_dir) +
304 (i * sizeof(struct asd_ocm_dir_ent))
305 , sizeof(struct asd_ocm_dir_ent));
306
307}
308
309static int
310asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha)
311{
312 struct pci_dev *pcidev = asd_ha->pcidev;
313 u32 reg;
314 int err = 0;
315 u32 v;
316
317 /* check if OCM has been initialized by BIOS */
318 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
319
320 if (!(reg & OCMINITIALIZED)) {
321 err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v);
322 if (err) {
323 asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n",
324 pci_name(pcidev));
325 goto out;
326 }
327
328 printk(KERN_INFO "OCM is not initialized by BIOS,"
329 "reinitialize it and ignore it, current IntrptStatus"
330 "is 0x%x\n", v);
331
332 if (v)
333 err = pci_write_config_dword(pcidev,
334 PCIC_INTRPT_STAT, v);
335 if (err) {
336 asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n",
337 pci_name(pcidev));
338 goto out;
339 }
340
341 asd_hwi_initialize_ocm_dir(asd_ha);
342
343 }
344out:
345 return err;
346}
347
348/**
349 * asd_read_ocm - read on chip memory (OCM)
350 * @asd_ha: pointer to the host adapter structure
351 */
352int asd_read_ocm(struct asd_ha_struct *asd_ha)
353{
354 int err;
355 struct asd_ocm_dir *dir;
356
357 if (asd_hwi_check_ocm_access(asd_ha))
358 return -1;
359
360 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
361 if (!dir) {
362 asd_printk("no memory for ocm dir\n");
363 return -ENOMEM;
364 }
365
366 err = asd_read_ocm_dir(asd_ha, dir, 0);
367 if (err)
368 goto out;
369
370 err = asd_get_bios_chim(asd_ha, dir);
371out:
372 kfree(dir);
373 return err;
374}
375
376/* ---------- FLASH stuff ---------- */
377
378#define FLASH_RESET 0xF0
379
380#define FLASH_SIZE 0x200000
381#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
382#define FLASH_NEXT_ENTRY_OFFS 0x2000
383#define FLASH_MAX_DIR_ENTRIES 32
384
385#define FLASH_DE_TYPE_MASK 0x3FFFFFFF
386#define FLASH_DE_MS 0x120
387#define FLASH_DE_CTRL_A_USER 0xE0
388
389struct asd_flash_de {
390 __le32 type;
391 __le32 offs;
392 __le32 pad_size;
393 __le32 image_size;
394 __le32 chksum;
395 u8 _r[12];
396 u8 version[32];
397} __attribute__ ((packed));
398
399struct asd_flash_dir {
400 u8 cookie[32];
401 __le32 rev; /* 2 */
402 __le32 chksum;
403 __le32 chksum_antidote;
404 __le32 bld;
405 u8 bld_id[32]; /* build id data */
406 u8 ver_data[32]; /* date and time of build */
407 __le32 ae_mask;
408 __le32 v_mask;
409 __le32 oc_mask;
410 u8 _r[20];
411 struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES];
412} __attribute__ ((packed));
413
414struct asd_manuf_sec {
415 char sig[2]; /* 'S', 'M' */
416 u16 offs_next;
417 u8 maj; /* 0 */
418 u8 min; /* 0 */
419 u16 chksum;
420 u16 size;
421 u8 _r[6];
422 u8 sas_addr[SAS_ADDR_SIZE];
423 u8 pcba_sn[ASD_PCBA_SN_SIZE];
424 /* Here start the other segments */
425 u8 linked_list[0];
426} __attribute__ ((packed));
427
428struct asd_manuf_phy_desc {
429 u8 state; /* low 4 bits */
430#define MS_PHY_STATE_ENABLEABLE 0
431#define MS_PHY_STATE_REPORTED 1
432#define MS_PHY_STATE_HIDDEN 2
433 u8 phy_id;
434 u16 _r;
435 u8 phy_control_0; /* mode 5 reg 0x160 */
436 u8 phy_control_1; /* mode 5 reg 0x161 */
437 u8 phy_control_2; /* mode 5 reg 0x162 */
438 u8 phy_control_3; /* mode 5 reg 0x163 */
439} __attribute__ ((packed));
440
441struct asd_manuf_phy_param {
442 char sig[2]; /* 'P', 'M' */
443 u16 next;
444 u8 maj; /* 0 */
445 u8 min; /* 2 */
446 u8 num_phy_desc; /* 8 */
447 u8 phy_desc_size; /* 8 */
448 u8 _r[3];
449 u8 usage_model_id;
450 u32 _r2;
451 struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS];
452} __attribute__ ((packed));
453
454#if 0
455static const char *asd_sb_type[] = {
456 "unknown",
457 "SGPIO",
458 [2 ... 0x7F] = "unknown",
459 [0x80] = "ADPT_I2C",
460 [0x81 ... 0xFF] = "VENDOR_UNIQUExx"
461};
462#endif
463
464struct asd_ms_sb_desc {
465 u8 type;
466 u8 node_desc_index;
467 u8 conn_desc_index;
468 u8 _recvd[0];
469} __attribute__ ((packed));
470
471#if 0
472static const char *asd_conn_type[] = {
473 [0 ... 7] = "unknown",
474 "SFF8470",
475 "SFF8482",
476 "SFF8484",
477 [0x80] = "PCIX_DAUGHTER0",
478 [0x81] = "SAS_DAUGHTER0",
479 [0x82 ... 0xFF] = "VENDOR_UNIQUExx"
480};
481
482static const char *asd_conn_location[] = {
483 "unknown",
484 "internal",
485 "external",
486 "board_to_board",
487};
488#endif
489
490struct asd_ms_conn_desc {
491 u8 type;
492 u8 location;
493 u8 num_sideband_desc;
494 u8 size_sideband_desc;
495 u32 _resvd;
496 u8 name[16];
497 struct asd_ms_sb_desc sb_desc[0];
498} __attribute__ ((packed));
499
500struct asd_nd_phy_desc {
501 u8 vp_attch_type;
502 u8 attch_specific[0];
503} __attribute__ ((packed));
504
505#if 0
506static const char *asd_node_type[] = {
507 "IOP",
508 "IO_CONTROLLER",
509 "EXPANDER",
510 "PORT_MULTIPLIER",
511 "PORT_MULTIPLEXER",
512 "MULTI_DROP_I2C_BUS",
513};
514#endif
515
516struct asd_ms_node_desc {
517 u8 type;
518 u8 num_phy_desc;
519 u8 size_phy_desc;
520 u8 _resvd;
521 u8 name[16];
522 struct asd_nd_phy_desc phy_desc[0];
523} __attribute__ ((packed));
524
525struct asd_ms_conn_map {
526 char sig[2]; /* 'M', 'C' */
527 __le16 next;
528 u8 maj; /* 0 */
529 u8 min; /* 0 */
530 __le16 cm_size; /* size of this struct */
531 u8 num_conn;
532 u8 conn_size;
533 u8 num_nodes;
534 u8 usage_model_id;
535 u32 _resvd;
536 struct asd_ms_conn_desc conn_desc[0];
537 struct asd_ms_node_desc node_desc[0];
538} __attribute__ ((packed));
539
540struct asd_ctrla_phy_entry {
541 u8 sas_addr[SAS_ADDR_SIZE];
542 u8 sas_link_rates; /* max in hi bits, min in low bits */
543 u8 flags;
544 u8 sata_link_rates;
545 u8 _r[5];
546} __attribute__ ((packed));
547
548struct asd_ctrla_phy_settings {
549 u8 id0; /* P'h'y */
550 u8 _r;
551 u16 next;
552 u8 num_phys; /* number of PHYs in the PCI function */
553 u8 _r2[3];
554 struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS];
555} __attribute__ ((packed));
556
557struct asd_ll_el {
558 u8 id0;
559 u8 id1;
560 __le16 next;
561 u8 something_here[0];
562} __attribute__ ((packed));
563
564static int asd_poll_flash(struct asd_ha_struct *asd_ha)
565{
566 int c;
567 u8 d;
568
569 for (c = 5000; c > 0; c--) {
570 d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
571 d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
572 if (!d)
573 return 0;
574 udelay(5);
575 }
576 return -ENOENT;
577}
578
579static int asd_reset_flash(struct asd_ha_struct *asd_ha)
580{
581 int err;
582
583 err = asd_poll_flash(asd_ha);
584 if (err)
585 return err;
586 asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET);
587 err = asd_poll_flash(asd_ha);
588
589 return err;
590}
591
592static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
593 void *buffer, u32 offs, int size)
594{
595 asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs,
596 size);
597 return 0;
598}
599
600/**
601 * asd_find_flash_dir - finds and reads the flash directory
602 * @asd_ha: pointer to the host adapter structure
603 * @flash_dir: pointer to flash directory structure
604 *
605 * If found, the flash directory segment will be copied to
606 * @flash_dir. Return 1 if found, 0 if not.
607 */
608static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
609 struct asd_flash_dir *flash_dir)
610{
611 u32 v;
612 for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
613 asd_read_flash_seg(asd_ha, flash_dir, v,
614 sizeof(FLASH_DIR_COOKIE)-1);
615 if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
616 sizeof(FLASH_DIR_COOKIE)-1) == 0) {
617 asd_ha->hw_prof.flash.dir_offs = v;
618 asd_read_flash_seg(asd_ha, flash_dir, v,
619 sizeof(*flash_dir));
620 return 1;
621 }
622 }
623 return 0;
624}
625
626static int asd_flash_getid(struct asd_ha_struct *asd_ha)
627{
628 int err = 0;
629 u32 reg;
630
631 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
632
633 if (!(reg & FLASHEX)) {
634 ASD_DPRINTK("flash doesn't exist\n");
635 return -ENOENT;
636 }
637 if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR,
638 &asd_ha->hw_prof.flash.bar)) {
639 asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n",
640 pci_name(asd_ha->pcidev));
641 return -ENOENT;
642 }
643 asd_ha->hw_prof.flash.present = 1;
644 asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0;
645 err = asd_reset_flash(asd_ha);
646 if (err) {
647 ASD_DPRINTK("couldn't reset flash(%d)\n", err);
648 return err;
649 }
650 return 0;
651}
652
653static u16 asd_calc_flash_chksum(u16 *p, int size)
654{
655 u16 chksum = 0;
656
657 while (size-- > 0)
658 chksum += *p++;
659
660 return chksum;
661}
662
663
664static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type,
665 u32 *offs, u32 *size)
666{
667 int i;
668 struct asd_flash_de *de;
669
670 for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) {
671 u32 type = le32_to_cpu(flash_dir->dir_entry[i].type);
672
673 type &= FLASH_DE_TYPE_MASK;
674 if (type == entry_type)
675 break;
676 }
677 if (i >= FLASH_MAX_DIR_ENTRIES)
678 return -ENOENT;
679 de = &flash_dir->dir_entry[i];
680 *offs = le32_to_cpu(de->offs);
681 *size = le32_to_cpu(de->pad_size);
682 return 0;
683}
684
685static int asd_validate_ms(struct asd_manuf_sec *ms)
686{
687 if (ms->sig[0] != 'S' || ms->sig[1] != 'M') {
688 ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n",
689 ms->sig[0], ms->sig[1]);
690 return -ENOENT;
691 }
692 if (ms->maj != 0) {
693 asd_printk("unsupported manuf. sector. major version:%x\n",
694 ms->maj);
695 return -ENOENT;
696 }
697 ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next);
698 ms->chksum = le16_to_cpu((__force __le16) ms->chksum);
699 ms->size = le16_to_cpu((__force __le16) ms->size);
700
701 if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) {
702 asd_printk("failed manuf sector checksum\n");
703 }
704
705 return 0;
706}
707
708static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha,
709 struct asd_manuf_sec *ms)
710{
711 memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE);
712 return 0;
713}
714
715static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha,
716 struct asd_manuf_sec *ms)
717{
718 memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE);
719 asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0';
720 return 0;
721}
722
723/**
724 * asd_find_ll_by_id - find a linked list entry by its id
725 * @start: void pointer to the first element in the linked list
726 * @id0: the first byte of the id (offs 0)
727 * @id1: the second byte of the id (offs 1)
728 *
729 * @start has to be the _base_ element start, since the
730 * linked list entries's offset is from this pointer.
731 * Some linked list entries use only the first id, in which case
732 * you can pass 0xFF for the second.
733 */
734static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
735{
736 struct asd_ll_el *el = start;
737
738 do {
739 switch (id1) {
740 default:
741 if (el->id1 == id1)
742 case 0xFF:
743 if (el->id0 == id0)
744 return el;
745 }
746 el = start + le16_to_cpu(el->next);
747 } while (el != start);
748
749 return NULL;
750}
751
752/**
753 * asd_ms_get_phy_params - get phy parameters from the manufacturing sector
754 * @asd_ha: pointer to the host adapter structure
755 * @manuf_sec: pointer to the manufacturing sector
756 *
757 * The manufacturing sector contans also the linked list of sub-segments,
758 * since when it was read, its size was taken from the flash directory,
759 * not from the structure size.
760 *
761 * HIDDEN phys do not count in the total count. REPORTED phys cannot
762 * be enabled but are reported and counted towards the total.
763 * ENEBLEABLE phys are enabled by default and count towards the total.
764 * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
765 * merely specifies the number of phys the host adapter decided to
766 * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
767 * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE.
768 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
769 * are actually enabled (enabled by default, max number of phys
770 * enableable in this case).
771 */
772static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
773 struct asd_manuf_sec *manuf_sec)
774{
775 int i;
776 int en_phys = 0;
777 int rep_phys = 0;
778 struct asd_manuf_phy_param *phy_param;
779 struct asd_manuf_phy_param dflt_phy_param;
780
781 phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M');
782 if (!phy_param) {
783 ASD_DPRINTK("ms: no phy parameters found\n");
784 ASD_DPRINTK("ms: Creating default phy parameters\n");
785 dflt_phy_param.sig[0] = 'P';
786 dflt_phy_param.sig[1] = 'M';
787 dflt_phy_param.maj = 0;
788 dflt_phy_param.min = 2;
789 dflt_phy_param.num_phy_desc = 8;
790 dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc);
791 for (i =0; i < ASD_MAX_PHYS; i++) {
792 dflt_phy_param.phy_desc[i].state = 0;
793 dflt_phy_param.phy_desc[i].phy_id = i;
794 dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6;
795 dflt_phy_param.phy_desc[i].phy_control_1 = 0x10;
796 dflt_phy_param.phy_desc[i].phy_control_2 = 0x43;
797 dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb;
798 }
799
800 phy_param = &dflt_phy_param;
801
802 }
803
804 if (phy_param->maj != 0) {
805 asd_printk("unsupported manuf. phy param major version:0x%x\n",
806 phy_param->maj);
807 return -ENOENT;
808 }
809
810 ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc);
811 asd_ha->hw_prof.enabled_phys = 0;
812 for (i = 0; i < phy_param->num_phy_desc; i++) {
813 struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i];
814 switch (pd->state & 0xF) {
815 case MS_PHY_STATE_HIDDEN:
816 ASD_DPRINTK("ms: phy%d: HIDDEN\n", i);
817 continue;
818 case MS_PHY_STATE_REPORTED:
819 ASD_DPRINTK("ms: phy%d: REPORTED\n", i);
820 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
821 rep_phys++;
822 continue;
823 case MS_PHY_STATE_ENABLEABLE:
824 ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i);
825 asd_ha->hw_prof.enabled_phys |= (1 << i);
826 en_phys++;
827 break;
828 }
829 asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0;
830 asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1;
831 asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2;
832 asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3;
833 }
834 asd_ha->hw_prof.max_phys = rep_phys + en_phys;
835 asd_ha->hw_prof.num_phys = en_phys;
836 ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n",
837 asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys);
838 ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys);
839 return 0;
840}
841
842static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha,
843 struct asd_manuf_sec *manuf_sec)
844{
845 struct asd_ms_conn_map *cm;
846
847 cm = asd_find_ll_by_id(manuf_sec, 'M', 'C');
848 if (!cm) {
849 ASD_DPRINTK("ms: no connector map found\n");
850 return 0;
851 }
852
853 if (cm->maj != 0) {
854 ASD_DPRINTK("ms: unsupported: connector map major version 0x%x"
855 "\n", cm->maj);
856 return -ENOENT;
857 }
858
859 /* XXX */
860
861 return 0;
862}
863
864
865/**
866 * asd_process_ms - find and extract information from the manufacturing sector
867 * @asd_ha: pointer to the host adapter structure
868 * @flash_dir: pointer to the flash directory
869 */
870static int asd_process_ms(struct asd_ha_struct *asd_ha,
871 struct asd_flash_dir *flash_dir)
872{
873 int err;
874 struct asd_manuf_sec *manuf_sec;
875 u32 offs, size;
876
877 err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size);
878 if (err) {
879 ASD_DPRINTK("Couldn't find the manuf. sector\n");
880 goto out;
881 }
882
883 if (size == 0)
884 goto out;
885
886 err = -ENOMEM;
887 manuf_sec = kmalloc(size, GFP_KERNEL);
888 if (!manuf_sec) {
889 ASD_DPRINTK("no mem for manuf sector\n");
890 goto out;
891 }
892
893 err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size);
894 if (err) {
895 ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n",
896 offs, size);
897 goto out2;
898 }
899
900 err = asd_validate_ms(manuf_sec);
901 if (err) {
902 ASD_DPRINTK("couldn't validate manuf sector\n");
903 goto out2;
904 }
905
906 err = asd_ms_get_sas_addr(asd_ha, manuf_sec);
907 if (err) {
908 ASD_DPRINTK("couldn't read the SAS_ADDR\n");
909 goto out2;
910 }
911 ASD_DPRINTK("manuf sect SAS_ADDR %llx\n",
912 SAS_ADDR(asd_ha->hw_prof.sas_addr));
913
914 err = asd_ms_get_pcba_sn(asd_ha, manuf_sec);
915 if (err) {
916 ASD_DPRINTK("couldn't read the PCBA SN\n");
917 goto out2;
918 }
919 ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn);
920
921 err = asd_ms_get_phy_params(asd_ha, manuf_sec);
922 if (err) {
923 ASD_DPRINTK("ms: couldn't get phy parameters\n");
924 goto out2;
925 }
926
927 err = asd_ms_get_connector_map(asd_ha, manuf_sec);
928 if (err) {
929 ASD_DPRINTK("ms: couldn't get connector map\n");
930 goto out2;
931 }
932
933out2:
934 kfree(manuf_sec);
935out:
936 return err;
937}
938
939static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha,
940 struct asd_ctrla_phy_settings *ps)
941{
942 int i;
943 for (i = 0; i < ps->num_phys; i++) {
944 struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i];
945
946 if (!PHY_ENABLED(asd_ha, i))
947 continue;
948 if (*(u64 *)pe->sas_addr == 0) {
949 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
950 continue;
951 }
952 /* This is the SAS address which should be sent in IDENTIFY. */
953 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr,
954 SAS_ADDR_SIZE);
955 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
956 (pe->sas_link_rates & 0xF0) >> 4;
957 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
958 (pe->sas_link_rates & 0x0F);
959 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
960 (pe->sata_link_rates & 0xF0) >> 4;
961 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
962 (pe->sata_link_rates & 0x0F);
963 asd_ha->hw_prof.phy_desc[i].flags = pe->flags;
964 ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x,"
965 " sata rate:0x%x-0x%x, flags:0x%x\n",
966 i,
967 SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr),
968 asd_ha->hw_prof.phy_desc[i].max_sas_lrate,
969 asd_ha->hw_prof.phy_desc[i].min_sas_lrate,
970 asd_ha->hw_prof.phy_desc[i].max_sata_lrate,
971 asd_ha->hw_prof.phy_desc[i].min_sata_lrate,
972 asd_ha->hw_prof.phy_desc[i].flags);
973 }
974
975 return 0;
976}
977
978/**
979 * asd_process_ctrl_a_user - process CTRL-A user settings
980 * @asd_ha: pointer to the host adapter structure
981 * @flash_dir: pointer to the flash directory
982 */
983static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
984 struct asd_flash_dir *flash_dir)
985{
986 int err, i;
987 u32 offs, size;
988 struct asd_ll_el *el;
989 struct asd_ctrla_phy_settings *ps;
990 struct asd_ctrla_phy_settings dflt_ps;
991
992 err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size);
993 if (err) {
994 ASD_DPRINTK("couldn't find CTRL-A user settings section\n");
995 ASD_DPRINTK("Creating default CTRL-A user settings section\n");
996
997 dflt_ps.id0 = 'h';
998 dflt_ps.num_phys = 8;
999 for (i =0; i < ASD_MAX_PHYS; i++) {
1000 memcpy(dflt_ps.phy_ent[i].sas_addr,
1001 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
1002 dflt_ps.phy_ent[i].sas_link_rates = 0x98;
1003 dflt_ps.phy_ent[i].flags = 0x0;
1004 dflt_ps.phy_ent[i].sata_link_rates = 0x0;
1005 }
1006
1007 size = sizeof(struct asd_ctrla_phy_settings);
1008 ps = &dflt_ps;
1009 }
1010
1011 if (size == 0)
1012 goto out;
1013
1014 err = -ENOMEM;
1015 el = kmalloc(size, GFP_KERNEL);
1016 if (!el) {
1017 ASD_DPRINTK("no mem for ctrla user settings section\n");
1018 goto out;
1019 }
1020
1021 err = asd_read_flash_seg(asd_ha, (void *)el, offs, size);
1022 if (err) {
1023 ASD_DPRINTK("couldn't read ctrla phy settings section\n");
1024 goto out2;
1025 }
1026
1027 err = -ENOENT;
1028 ps = asd_find_ll_by_id(el, 'h', 0xFF);
1029 if (!ps) {
1030 ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
1031 goto out2;
1032 }
1033
1034 err = asd_process_ctrla_phy_settings(asd_ha, ps);
1035 if (err) {
1036 ASD_DPRINTK("couldn't process ctrla phy settings\n");
1037 goto out2;
1038 }
1039out2:
1040 kfree(el);
1041out:
1042 return err;
1043}
1044
1045/**
1046 * asd_read_flash - read flash memory
1047 * @asd_ha: pointer to the host adapter structure
1048 */
1049int asd_read_flash(struct asd_ha_struct *asd_ha)
1050{
1051 int err;
1052 struct asd_flash_dir *flash_dir;
1053
1054 err = asd_flash_getid(asd_ha);
1055 if (err)
1056 return err;
1057
1058 flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL);
1059 if (!flash_dir)
1060 return -ENOMEM;
1061
1062 err = -ENOENT;
1063 if (!asd_find_flash_dir(asd_ha, flash_dir)) {
1064 ASD_DPRINTK("couldn't find flash directory\n");
1065 goto out;
1066 }
1067
1068 if (le32_to_cpu(flash_dir->rev) != 2) {
1069 asd_printk("unsupported flash dir version:0x%x\n",
1070 le32_to_cpu(flash_dir->rev));
1071 goto out;
1072 }
1073
1074 err = asd_process_ms(asd_ha, flash_dir);
1075 if (err) {
1076 ASD_DPRINTK("couldn't process manuf sector settings\n");
1077 goto out;
1078 }
1079
1080 err = asd_process_ctrl_a_user(asd_ha, flash_dir);
1081 if (err) {
1082 ASD_DPRINTK("couldn't process CTRL-A user settings\n");
1083 goto out;
1084 }
1085
1086out:
1087 kfree(flash_dir);
1088 return err;
1089}
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
new file mode 100644
index 000000000000..56e4b3ba6a08
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -0,0 +1,1404 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * Parts of this code adapted from David Chaw's adp94xx_seq.c.
8 *
9 * This file is licensed under GPLv2.
10 *
11 * This file is part of the aic94xx driver.
12 *
13 * The aic94xx driver is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; version 2 of the
16 * License.
17 *
18 * The aic94xx driver is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with the aic94xx driver; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 *
27 */
28
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/module.h>
32#include <linux/firmware.h>
33#include "aic94xx_reg.h"
34#include "aic94xx_hwi.h"
35
36#include "aic94xx_seq.h"
37#include "aic94xx_dump.h"
38
39/* It takes no more than 0.05 us for an instruction
40 * to complete. So waiting for 1 us should be more than
41 * plenty.
42 */
43#define PAUSE_DELAY 1
44#define PAUSE_TRIES 1000
45
46static const struct firmware *sequencer_fw;
47static const char *sequencer_version;
48static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
49 cseq_idle_loop, lseq_idle_loop;
50static u8 *cseq_code, *lseq_code;
51static u32 cseq_code_size, lseq_code_size;
52
53static u16 first_scb_site_no = 0xFFFF;
54static u16 last_scb_site_no;
55
56/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
57
58/**
59 * asd_pause_cseq - pause the central sequencer
60 * @asd_ha: pointer to host adapter structure
61 *
62 * Return 0 on success, negative on failure.
63 */
64int asd_pause_cseq(struct asd_ha_struct *asd_ha)
65{
66 int count = PAUSE_TRIES;
67 u32 arp2ctl;
68
69 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
70 if (arp2ctl & PAUSED)
71 return 0;
72
73 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
74 do {
75 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
76 if (arp2ctl & PAUSED)
77 return 0;
78 udelay(PAUSE_DELAY);
79 } while (--count > 0);
80
81 ASD_DPRINTK("couldn't pause CSEQ\n");
82 return -1;
83}
84
85/**
86 * asd_unpause_cseq - unpause the central sequencer.
87 * @asd_ha: pointer to host adapter structure.
88 *
89 * Return 0 on success, negative on error.
90 */
91int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
92{
93 u32 arp2ctl;
94 int count = PAUSE_TRIES;
95
96 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
97 if (!(arp2ctl & PAUSED))
98 return 0;
99
100 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
101 do {
102 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
103 if (!(arp2ctl & PAUSED))
104 return 0;
105 udelay(PAUSE_DELAY);
106 } while (--count > 0);
107
108 ASD_DPRINTK("couldn't unpause the CSEQ\n");
109 return -1;
110}
111
112/**
113 * asd_seq_pause_lseq - pause a link sequencer
114 * @asd_ha: pointer to a host adapter structure
115 * @lseq: link sequencer of interest
116 *
117 * Return 0 on success, negative on error.
118 */
119static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
120{
121 u32 arp2ctl;
122 int count = PAUSE_TRIES;
123
124 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
125 if (arp2ctl & PAUSED)
126 return 0;
127
128 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
129 do {
130 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
131 if (arp2ctl & PAUSED)
132 return 0;
133 udelay(PAUSE_DELAY);
134 } while (--count > 0);
135
136 ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
137 return -1;
138}
139
140/**
141 * asd_pause_lseq - pause the link sequencer(s)
142 * @asd_ha: pointer to host adapter structure
143 * @lseq_mask: mask of link sequencers of interest
144 *
145 * Return 0 on success, negative on failure.
146 */
147int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
148{
149 int lseq;
150 int err = 0;
151
152 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
153 err = asd_seq_pause_lseq(asd_ha, lseq);
154 if (err)
155 return err;
156 }
157
158 return err;
159}
160
161/**
162 * asd_seq_unpause_lseq - unpause a link sequencer
163 * @asd_ha: pointer to host adapter structure
164 * @lseq: link sequencer of interest
165 *
166 * Return 0 on success, negative on error.
167 */
168static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
169{
170 u32 arp2ctl;
171 int count = PAUSE_TRIES;
172
173 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
174 if (!(arp2ctl & PAUSED))
175 return 0;
176
177 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
178 do {
179 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
180 if (!(arp2ctl & PAUSED))
181 return 0;
182 udelay(PAUSE_DELAY);
183 } while (--count > 0);
184
185 ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
186 return 0;
187}
188
189
190/**
191 * asd_unpause_lseq - unpause the link sequencer(s)
192 * @asd_ha: pointer to host adapter structure
193 * @lseq_mask: mask of link sequencers of interest
194 *
195 * Return 0 on success, negative on failure.
196 */
197int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
198{
199 int lseq;
200 int err = 0;
201
202 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
203 err = asd_seq_unpause_lseq(asd_ha, lseq);
204 if (err)
205 return err;
206 }
207
208 return err;
209}
210
211/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
212
213static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
214 u32 size)
215{
216 u32 addr = CSEQ_RAM_REG_BASE_ADR;
217 const u32 *prog = (u32 *) _prog;
218 u32 i;
219
220 for (i = 0; i < size; i += 4, prog++, addr += 4) {
221 u32 val = asd_read_reg_dword(asd_ha, addr);
222
223 if (le32_to_cpu(*prog) != val) {
224 asd_printk("%s: cseq verify failed at %u "
225 "read:0x%x, wanted:0x%x\n",
226 pci_name(asd_ha->pcidev),
227 i, val, le32_to_cpu(*prog));
228 return -1;
229 }
230 }
231 ASD_DPRINTK("verified %d bytes, passed\n", size);
232 return 0;
233}
234
235/**
236 * asd_verify_lseq - verify the microcode of a link sequencer
237 * @asd_ha: pointer to host adapter structure
238 * @_prog: pointer to the microcode
239 * @size: size of the microcode in bytes
240 * @lseq: link sequencer of interest
241 *
242 * The link sequencer code is accessed in 4 KB pages, which are selected
243 * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
244 * The 10 KB LSEQm instruction code is mapped, page at a time, at
245 * LmSEQRAM address.
246 */
247static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
248 u32 size, int lseq)
249{
250#define LSEQ_CODEPAGE_SIZE 4096
251 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
252 u32 page;
253 const u32 *prog = (u32 *) _prog;
254
255 for (page = 0; page < pages; page++) {
256 u32 i;
257
258 asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
259 page << LmRAMPAGE_LSHIFT);
260 for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
261 i += 4, prog++, size-=4) {
262
263 u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
264
265 if (le32_to_cpu(*prog) != val) {
266 asd_printk("%s: LSEQ%d verify failed "
267 "page:%d, offs:%d\n",
268 pci_name(asd_ha->pcidev),
269 lseq, page, i);
270 return -1;
271 }
272 }
273 }
274 ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
275 (int)((u8 *)prog-_prog));
276 return 0;
277}
278
279/**
280 * asd_verify_seq -- verify CSEQ/LSEQ microcode
281 * @asd_ha: pointer to host adapter structure
282 * @prog: pointer to microcode
283 * @size: size of the microcode
284 * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
285 *
286 * Return 0 if microcode is correct, negative on mismatch.
287 */
288static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
289 u32 size, u8 lseq_mask)
290{
291 if (lseq_mask == 0)
292 return asd_verify_cseq(asd_ha, prog, size);
293 else {
294 int lseq, err;
295
296 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
297 err = asd_verify_lseq(asd_ha, prog, size, lseq);
298 if (err)
299 return err;
300 }
301 }
302
303 return 0;
304}
305#define ASD_DMA_MODE_DOWNLOAD
306#ifdef ASD_DMA_MODE_DOWNLOAD
307/* This is the size of the CSEQ Mapped instruction page */
308#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
309static int asd_download_seq(struct asd_ha_struct *asd_ha,
310 const u8 * const prog, u32 size, u8 lseq_mask)
311{
312 u32 comstaten;
313 u32 reg;
314 int page;
315 const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
316 struct asd_dma_tok *token;
317 int err = 0;
318
319 if (size % 4) {
320 asd_printk("sequencer program not multiple of 4\n");
321 return -1;
322 }
323
324 asd_pause_cseq(asd_ha);
325 asd_pause_lseq(asd_ha, 0xFF);
326
327 /* save, disable and clear interrupts */
328 comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
329 asd_write_reg_dword(asd_ha, COMSTATEN, 0);
330 asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
331
332 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
333 asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
334
335 token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
336 if (!token) {
337 asd_printk("out of memory for dma SEQ download\n");
338 err = -ENOMEM;
339 goto out;
340 }
341 ASD_DPRINTK("dma-ing %d bytes\n", size);
342
343 for (page = 0; page < pages; page++) {
344 int i;
345 u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
346 (u32)MAX_DMA_OVLY_COUNT);
347
348 memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
349 asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
350 asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
351 reg = !page ? RESETOVLYDMA : 0;
352 reg |= (STARTOVLYDMA | OVLYHALTERR);
353 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
354 /* Start DMA. */
355 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
356
357 for (i = PAUSE_TRIES*100; i > 0; i--) {
358 u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
359 if (!(dmadone & OVLYDMAACT))
360 break;
361 udelay(PAUSE_DELAY);
362 }
363 }
364
365 reg = asd_read_reg_dword(asd_ha, COMSTAT);
366 if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
367 || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
368 asd_printk("%s: error DMA-ing sequencer code\n",
369 pci_name(asd_ha->pcidev));
370 err = -ENODEV;
371 }
372
373 asd_free_coherent(asd_ha, token);
374 out:
375 asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
376
377 return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
378}
379#else /* ASD_DMA_MODE_DOWNLOAD */
380static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
381 u32 size, u8 lseq_mask)
382{
383 int i;
384 u32 reg = 0;
385 const u32 *prog = (u32 *) _prog;
386
387 if (size % 4) {
388 asd_printk("sequencer program not multiple of 4\n");
389 return -1;
390 }
391
392 asd_pause_cseq(asd_ha);
393 asd_pause_lseq(asd_ha, 0xFF);
394
395 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
396 reg |= PIOCMODE;
397
398 asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
399 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
400
401 ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
402 lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
403
404 for (i = 0; i < size; i += 4, prog++)
405 asd_write_reg_dword(asd_ha, SPIODATA, *prog);
406
407 reg = (reg & ~PIOCMODE) | OVLYHALTERR;
408 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
409
410 return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
411}
412#endif /* ASD_DMA_MODE_DOWNLOAD */
413
414/**
415 * asd_seq_download_seqs - download the sequencer microcode
416 * @asd_ha: pointer to host adapter structure
417 *
418 * Download the central and link sequencer microcode.
419 */
420static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
421{
422 int err;
423
424 if (!asd_ha->hw_prof.enabled_phys) {
425 asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
426 return -ENODEV;
427 }
428
429 /* Download the CSEQ */
430 ASD_DPRINTK("downloading CSEQ...\n");
431 err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
432 if (err) {
433 asd_printk("CSEQ download failed:%d\n", err);
434 return err;
435 }
436
437 /* Download the Link Sequencers code. All of the Link Sequencers
438 * microcode can be downloaded at the same time.
439 */
440 ASD_DPRINTK("downloading LSEQs...\n");
441 err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
442 asd_ha->hw_prof.enabled_phys);
443 if (err) {
444 /* Try it one at a time */
445 u8 lseq;
446 u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
447
448 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
449 err = asd_download_seq(asd_ha, lseq_code,
450 lseq_code_size, 1<<lseq);
451 if (err)
452 break;
453 }
454 }
455 if (err)
456 asd_printk("LSEQs download failed:%d\n", err);
457
458 return err;
459}
460
461/* ---------- Initializing the chip, chip memory, etc. ---------- */
462
463/**
464 * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
465 * @asd_ha: pointer to host adapter structure
466 */
467static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
468{
469 /* CSEQ Mode Independent, page 4 setup. */
470 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
471 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
472 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
473 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
474 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
475 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
476 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
477 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
478 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
479 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
480 asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
481 asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
482 asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
483 asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
484 {
485 u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
486 u8 val = hweight8(con);
487 asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
488 }
489 asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
490
491 /* CSEQ Mode independent, page 5 setup. */
492 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
493 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
494 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
495 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
496 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
497 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
498 asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
499 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
500 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
501 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
502
503 /* CSEQ Mode independent, page 6 setup. */
504 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
505 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
506 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
507 asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
508 asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
509 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
510 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
511 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
512 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
513 /* Calculate the free scb mask. */
514 {
515 u16 cmdctx = asd_get_cmdctx_size(asd_ha);
516 cmdctx = (~((cmdctx/128)-1)) >> 8;
517 asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
518 }
519 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
520 first_scb_site_no);
521 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
522 last_scb_site_no);
523 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
524 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
525
526 /* CSEQ Mode independent, page 7 setup. */
527 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
528 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
529 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
530 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
531 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
532 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
533 asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
534 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
535 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
536 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
537 asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
538 asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
539}
540
541/**
542 * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
543 * @asd_ha: pointer to host adapter structure
544 */
545static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
546{
547 int i;
548 int moffs;
549
550 moffs = CSEQ_PAGE_SIZE * 2;
551
552 /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
553 for (i = 0; i < 8; i++) {
554 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
555 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
556 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
557 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
558 asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
559 }
560
561 /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
562
563 /* CSEQ Mode dependent, mode 8, page 0 setup. */
564 asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
565 asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
566 asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
567 asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
568 asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
569 asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
570 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
571 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
572 asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
573 asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
574 asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
575 asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
576 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
577 (u16)last_scb_site_no+1);
578 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
579 (u16)asd_ha->hw_prof.max_ddbs);
580
581 /* CSEQ Mode dependent, mode 8, page 1 setup. */
582 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
583 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
584 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
585 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
586
587 /* CSEQ Mode dependent, mode 8, page 2 setup. */
588 /* Tell the sequencer the bus address of the first SCB. */
589 asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
590 asd_ha->seq.next_scb.dma_handle);
591 ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
592 (unsigned long long)asd_ha->seq.next_scb.dma_handle);
593
594 /* Tell the sequencer the first Done List entry address. */
595 asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
596 asd_ha->seq.actual_dl->dma_handle);
597
598 /* Initialize the Q_DONE_POINTER with the least significant
599 * 4 bytes of the first Done List address. */
600 asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
601 ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
602
603 asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
604
605 /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
606}
607
608/**
609 * asd_init_cseq_scratch -- setup and init CSEQ
610 * @asd_ha: pointer to host adapter structure
611 *
612 * Setup and initialize Central sequencers. Initialiaze the mode
613 * independent and dependent scratch page to the default settings.
614 */
615static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
616{
617 asd_init_cseq_mip(asd_ha);
618 asd_init_cseq_mdp(asd_ha);
619}
620
621/**
622 * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
623 * @asd_ha: pointer to host adapter structure
624 */
625static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
626{
627 int i;
628
629 /* LSEQ Mode independent page 0 setup. */
630 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
631 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
632 asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
633 asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
634 ASD_NOTIFY_ENABLE_SPINUP);
635 asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
636 asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
637 asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
638 asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
639 asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
640 asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
641 asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
642 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
643 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
644
645 /* LSEQ Mode independent page 1 setup. */
646 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
647 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
648 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
649 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
650 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
651 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
652 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
653 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
654 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
655 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
656 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
657 asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
658 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
659 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
660
661 /* LSEQ Mode Independent page 2 setup. */
662 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
663 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
664 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
665 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
666 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
667 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
668 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
669 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
670 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
671 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
672 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
673 for (i = 0; i < 12; i += 4)
674 asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
675
676 /* LSEQ Mode Independent page 3 setup. */
677
678 /* Device present timer timeout */
679 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
680 ASD_DEV_PRESENT_TIMEOUT);
681
682 /* SATA interlock timer disabled */
683 asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
684 ASD_SATA_INTERLOCK_TIMEOUT);
685
686 /* STP shutdown timer timeout constant, IGNORED by the sequencer,
687 * always 0. */
688 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
689 ASD_STP_SHUTDOWN_TIMEOUT);
690
691 asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
692 ASD_SRST_ASSERT_TIMEOUT);
693
694 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
695 ASD_RCV_FIS_TIMEOUT);
696
697 asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
698 ASD_ONE_MILLISEC_TIMEOUT);
699
700 /* COM_INIT timer */
701 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
702 ASD_TEN_MILLISEC_TIMEOUT);
703
704 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
705 ASD_SMP_RCV_TIMEOUT);
706}
707
708/**
709 * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
710 * @asd_ha: pointer to host adapter structure
711 */
712static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
713{
714 int i;
715 u32 moffs;
716 u16 ret_addr[] = {
717 0xFFFF, /* mode 0 */
718 0xFFFF, /* mode 1 */
719 mode2_task, /* mode 2 */
720 0,
721 0xFFFF, /* mode 4/5 */
722 0xFFFF, /* mode 4/5 */
723 };
724
725 /*
726 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
727 * 14 bytes.
728 */
729 for (i = 0; i < 3; i++) {
730 moffs = i * LSEQ_MODE_SCRATCH_SIZE;
731 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
732 ret_addr[i]);
733 asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
734 asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
735 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
736 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
737 asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
738 asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
739 }
740 /*
741 * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
742 */
743 asd_write_reg_word(asd_ha,
744 LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
745 ret_addr[5]);
746 asd_write_reg_word(asd_ha,
747 LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
748 asd_write_reg_word(asd_ha,
749 LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
750 asd_write_reg_word(asd_ha,
751 LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
752 asd_write_reg_word(asd_ha,
753 LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
754 asd_write_reg_byte(asd_ha,
755 LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
756 asd_write_reg_word(asd_ha,
757 LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
758
759 /* LSEQ Mode dependent 0, page 0 setup. */
760 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
761 (u16)asd_ha->hw_prof.max_ddbs);
762 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
763 asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
764 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
765 (u16)last_scb_site_no+1);
766 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
767 (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16));
768 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
769 (u16) LmM0INTEN_MASK & 0xFFFF);
770 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
771 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
772 asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
773 asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
774 asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
775
776 /* LSEQ mode dependent, mode 1, page 0 setup. */
777 asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
778 asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
779 asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
780 asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
781 asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
782 asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
783 asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
784 asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
785
786 /* LSEQ Mode dependent mode 2, page 0 setup */
787 asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
788 asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
789 asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
790 asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
791 asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
792 asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
793
794 /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
795 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
796 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
797 asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
798 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
799 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
800 asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
801 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
802 asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
803 asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
804 /*
805 * Set the desired interval between transmissions of the NOTIFY
806 * (ENABLE SPINUP) primitive. Must be initilized to val - 1.
807 */
808 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
809 ASD_NOTIFY_TIMEOUT - 1);
810 /* No delay for the first NOTIFY to be sent to the attached target. */
811 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
812 ASD_NOTIFY_DOWN_COUNT);
813
814 /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
815 for (i = 0; i < 2; i++) {
816 int j;
817 /* Start from Page 1 of Mode 0 and 1. */
818 moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
819 /* All the fields of page 1 can be intialized to 0. */
820 for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
821 asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
822 }
823
824 /* LSEQ Mode dependent, mode 2, page 1 setup. */
825 asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
826 asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
827 asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
828
829 /* LSEQ Mode dependent, mode 4/5, page 1. */
830 for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
831 asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
832 asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
833 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
834 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
835 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
836 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
837 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
838 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
839 asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
840
841 /* LSEQ Mode dependent, mode 0, page 2 setup. */
842 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
843 asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
844 asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
845 asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
846 asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
847
848 /* LSEQ Mode Dependent 1, page 2 setup. */
849 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
850 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
851 asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
852 asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
853 asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
854
855 /* LSEQ Mode Dependent 2, page 2 setup. */
856 /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
857 * i.e. always 0. */
858 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
859 asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
860 asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
861 asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
862 asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
863 asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
864
865 /* LSEQ Mode Dependent 4/5, page 2 setup. */
866 asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
867 asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
868 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
869 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
870}
871
872/**
873 * asd_init_lseq_scratch -- setup and init link sequencers
874 * @asd_ha: pointer to host adapter struct
875 */
876static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
877{
878 u8 lseq;
879 u8 lseq_mask;
880
881 lseq_mask = asd_ha->hw_prof.enabled_phys;
882 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
883 asd_init_lseq_mip(asd_ha, lseq);
884 asd_init_lseq_mdp(asd_ha, lseq);
885 }
886}
887
888/**
889 * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
890 * @asd_ha: pointer to host adapter structure
891 *
892 * This should be done before initializing common CSEQ and LSEQ
893 * scratch since those areas depend on some computed values here,
894 * last_scb_site_no, etc.
895 */
896static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
897{
898 u16 site_no;
899 u16 max_scbs = 0;
900
901 for (site_no = asd_ha->hw_prof.max_scbs-1;
902 site_no != (u16) -1;
903 site_no--) {
904 u16 i;
905
906 /* Initialize all fields in the SCB site to 0. */
907 for (i = 0; i < ASD_SCB_SIZE; i += 4)
908 asd_scbsite_write_dword(asd_ha, site_no, i, 0);
909
910 /* Workaround needed by SEQ to fix a SATA issue is to exclude
911 * certain SCB sites from the free list. */
912 if (!SCB_SITE_VALID(site_no))
913 continue;
914
915 if (last_scb_site_no == 0)
916 last_scb_site_no = site_no;
917
918 /* For every SCB site, we need to initialize the
919 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
920 * and SG Element Flag. */
921
922 /* Q_NEXT field of the last SCB is invalidated. */
923 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
924
925 /* Initialize SCB Site Opcode field to invalid. */
926 asd_scbsite_write_byte(asd_ha, site_no,
927 offsetof(struct scb_header, opcode),
928 0xFF);
929
930 /* Initialize SCB Site Flags field to mean a response
931 * frame has been received. This means inadvertent
932 * frames received to be dropped. */
933 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
934
935 first_scb_site_no = site_no;
936 max_scbs++;
937 }
938 asd_ha->hw_prof.max_scbs = max_scbs;
939 ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
940 ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
941 ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
942}
943
944/**
945 * asd_init_cseq_cio - initialize CSEQ CIO registers
946 * @asd_ha: pointer to host adapter structure
947 */
948static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
949{
950 int i;
951
952 asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
953 asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
954 asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
955 asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
956 asd_ha->seq.scbpro = 0;
957 asd_write_reg_dword(asd_ha, SCBPRO, 0);
958 asd_write_reg_dword(asd_ha, CSEQCON, 0);
959
960 /* Intialize CSEQ Mode 11 Interrupt Vectors.
961 * The addresses are 16 bit wide and in dword units.
962 * The values of their macros are in byte units.
963 * Thus we have to divide by 4. */
964 asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
965 asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
966 asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
967
968 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
969 asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
970
971 /* Initialize CSEQ Scratch Page to 0x04. */
972 asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
973
974 /* Initialize CSEQ Mode[0-8] Dependent registers. */
975 /* Initialize Scratch Page to 0. */
976 for (i = 0; i < 9; i++)
977 asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
978
979 /* Reset the ARP2 Program Count. */
980 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
981
982 for (i = 0; i < 8; i++) {
983 /* Intialize Mode n Link m Interrupt Enable. */
984 asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
985 /* Initialize Mode n Request Mailbox. */
986 asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
987 }
988}
989
990/**
991 * asd_init_lseq_cio -- initialize LmSEQ CIO registers
992 * @asd_ha: pointer to host adapter structure
993 */
994static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
995{
996 u8 *sas_addr;
997 int i;
998
999 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
1000 asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
1001
1002 asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
1003
1004 /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
1005 for (i = 0; i < 3; i++)
1006 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
1007
1008 /* Initialize Mode 5 SCRATCHPAGE to 0. */
1009 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
1010
1011 asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
1012 /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
1013 * Interrupt registers. */
1014 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
1015 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
1016 /* Mode 1 */
1017 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
1018 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
1019 /* Mode 2 */
1020 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
1021 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
1022 /* Mode 5 */
1023 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
1024 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
1025
1026 /* Enable HW Timer status. */
1027 asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
1028
1029 /* Enable Primitive Status 0 and 1. */
1030 asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
1031 asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
1032
1033 /* Enable Frame Error. */
1034 asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
1035 asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
1036
1037 /* Initialize Mode 0 Transfer Level to 512. */
1038 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
1039 /* Initialize Mode 1 Transfer Level to 256. */
1040 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
1041
1042 /* Initialize Program Count. */
1043 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1044
1045 /* Enable Blind SG Move. */
1046 asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
1047 asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
1048 ASD_SATA_INTERLOCK_TIMEOUT);
1049
1050 (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
1051
1052 /* Clear Primitive Status 0 and 1. */
1053 asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
1054 asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
1055
1056 /* Clear HW Timer status. */
1057 asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
1058
1059 /* Clear DMA Errors for Mode 0 and 1. */
1060 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
1061 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
1062
1063 /* Clear SG DMA Errors for Mode 0 and 1. */
1064 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
1065 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
1066
1067 /* Clear Mode 0 Buffer Parity Error. */
1068 asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
1069
1070 /* Clear Mode 0 Frame Error register. */
1071 asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
1072
1073 /* Reset LSEQ external interrupt arbiter. */
1074 asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
1075
1076 /* Set the Phy SAS for the LmSEQ WWN. */
1077 sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
1078 for (i = 0; i < SAS_ADDR_SIZE; i++)
1079 asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
1080
1081 /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1082 asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
1083
1084 /* Set the Bus Inactivity Time Limit Timer. */
1085 asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
1086
1087 /* Enable SATA Port Multiplier. */
1088 asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
1089
1090 /* Initialize Interrupt Vector[0-10] address in Mode 3.
1091 * See the comment on CSEQ_INT_* */
1092 asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
1093 asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
1094 asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
1095 asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
1096 asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
1097 asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
1098 asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
1099 asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
1100 asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
1101 asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
1102 asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
1103 /*
1104 * Program the Link LED control, applicable only for
1105 * Chip Rev. B or later.
1106 */
1107 asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
1108 (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
1109
1110 /* Set the Align Rate for SAS and STP mode. */
1111 asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
1112 asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
1113}
1114
1115
1116/**
1117 * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1118 * @asd_ha: pointer to host adapter struct
1119 */
1120static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
1121{
1122 int i;
1123
1124 for (i = 0; i < 8; i++)
1125 asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
1126 for (i = 0; i < 8; i++)
1127 asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
1128 /* Reset the external interrupt arbiter. */
1129 asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
1130}
1131
1132/**
1133 * asd_init_ddb_0 -- initialize DDB 0
1134 * @asd_ha: pointer to host adapter structure
1135 *
1136 * Initialize DDB site 0 which is used internally by the sequencer.
1137 */
1138static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1139{
1140 int i;
1141
1142 /* Zero out the DDB explicitly */
1143 for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
1144 asd_ddbsite_write_dword(asd_ha, 0, i, 0);
1145
1146 asd_ddbsite_write_word(asd_ha, 0,
1147 offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
1148 asd_ddbsite_write_word(asd_ha, 0,
1149 offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
1150 asd_ha->hw_prof.max_ddbs-1);
1151 asd_ddbsite_write_word(asd_ha, 0,
1152 offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
1153 asd_ddbsite_write_word(asd_ha, 0,
1154 offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
1155 asd_ddbsite_write_word(asd_ha, 0,
1156 offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
1157 asd_ddbsite_write_word(asd_ha, 0,
1158 offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
1159 asd_ddbsite_write_word(asd_ha, 0,
1160 offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
1161 asd_ddbsite_write_word(asd_ha, 0,
1162 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
1163 asd_ddbsite_write_word(asd_ha, 0,
1164 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
1165 asd_ha->hw_prof.num_phys * 2);
1166 asd_ddbsite_write_byte(asd_ha, 0,
1167 offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
1168 asd_ddbsite_write_byte(asd_ha, 0,
1169 offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
1170 asd_ddbsite_write_byte(asd_ha, 0,
1171 offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
1172 /* DDB 0 is reserved */
1173 set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1174}
1175
1176/**
1177 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1178 * @asd_ha: pointer to host adapter structure
1179 */
1180static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1181{
1182 int lseq;
1183 u8 lseq_mask;
1184
1185 /* Initialize SCB sites. Done first to compute some values which
1186 * the rest of the init code depends on. */
1187 asd_init_scb_sites(asd_ha);
1188
1189 /* Initialize CSEQ Scratch RAM registers. */
1190 asd_init_cseq_scratch(asd_ha);
1191
1192 /* Initialize LmSEQ Scratch RAM registers. */
1193 asd_init_lseq_scratch(asd_ha);
1194
1195 /* Initialize CSEQ CIO registers. */
1196 asd_init_cseq_cio(asd_ha);
1197
1198 asd_init_ddb_0(asd_ha);
1199
1200 /* Initialize LmSEQ CIO registers. */
1201 lseq_mask = asd_ha->hw_prof.enabled_phys;
1202 for_each_sequencer(lseq_mask, lseq_mask, lseq)
1203 asd_init_lseq_cio(asd_ha, lseq);
1204 asd_post_init_cseq(asd_ha);
1205}
1206
1207
1208/**
1209 * asd_seq_start_cseq -- start the central sequencer, CSEQ
1210 * @asd_ha: pointer to host adapter structure
1211 */
1212static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
1213{
1214 /* Reset the ARP2 instruction to location zero. */
1215 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
1216
1217 /* Unpause the CSEQ */
1218 return asd_unpause_cseq(asd_ha);
1219}
1220
1221/**
1222 * asd_seq_start_lseq -- start a link sequencer
1223 * @asd_ha: pointer to host adapter structure
1224 * @lseq: the link sequencer of interest
1225 */
1226static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1227{
1228 /* Reset the ARP2 instruction to location zero. */
1229 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1230
1231 /* Unpause the LmSEQ */
1232 return asd_seq_unpause_lseq(asd_ha, lseq);
1233}
1234
1235static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1236{
1237 int err, i;
1238 struct sequencer_file_header header, *hdr_ptr;
1239 u32 csum = 0;
1240 u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
1241
1242 if (sequencer_fw)
1243 /* already loaded */
1244 return 0;
1245
1246 err = request_firmware(&sequencer_fw,
1247 SAS_RAZOR_SEQUENCER_FW_FILE,
1248 &asd_ha->pcidev->dev);
1249 if (err)
1250 return err;
1251
1252 hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data;
1253
1254 header.csum = le32_to_cpu(hdr_ptr->csum);
1255 header.major = le32_to_cpu(hdr_ptr->major);
1256 header.minor = le32_to_cpu(hdr_ptr->minor);
1257 sequencer_version = hdr_ptr->version;
1258 header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
1259 header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
1260 header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
1261 header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
1262 header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
1263 header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
1264 header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
1265 header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
1266 header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
1267 header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
1268 header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
1269
1270 for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
1271 csum += sequencer_fw->data[i];
1272
1273 if (csum != header.csum) {
1274 asd_printk("Firmware file checksum mismatch\n");
1275 return -EINVAL;
1276 }
1277
1278 if (header.cseq_table_size != CSEQ_NUM_VECS ||
1279 header.lseq_table_size != LSEQ_NUM_VECS) {
1280 asd_printk("Firmware file table size mismatch\n");
1281 return -EINVAL;
1282 }
1283
1284 ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
1285 ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
1286 mode2_task = header.mode2_task;
1287 cseq_idle_loop = header.cseq_idle_loop;
1288 lseq_idle_loop = header.lseq_idle_loop;
1289
1290 for (i = 0; i < CSEQ_NUM_VECS; i++)
1291 cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
1292
1293 for (i = 0; i < LSEQ_NUM_VECS; i++)
1294 lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
1295
1296 cseq_code = &sequencer_fw->data[header.cseq_code_offset];
1297 cseq_code_size = header.cseq_code_size;
1298 lseq_code = &sequencer_fw->data[header.lseq_code_offset];
1299 lseq_code_size = header.lseq_code_size;
1300
1301 return 0;
1302}
1303
1304int asd_init_seqs(struct asd_ha_struct *asd_ha)
1305{
1306 int err;
1307
1308 err = asd_request_firmware(asd_ha);
1309
1310 if (err) {
1311 asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1312 SAS_RAZOR_SEQUENCER_FW_FILE, err);
1313 return err;
1314 }
1315
1316 asd_printk("using sequencer %s\n", sequencer_version);
1317 err = asd_seq_download_seqs(asd_ha);
1318 if (err) {
1319 asd_printk("couldn't download sequencers for %s\n",
1320 pci_name(asd_ha->pcidev));
1321 return err;
1322 }
1323
1324 asd_seq_setup_seqs(asd_ha);
1325
1326 return 0;
1327}
1328
1329int asd_start_seqs(struct asd_ha_struct *asd_ha)
1330{
1331 int err;
1332 u8 lseq_mask;
1333 int lseq;
1334
1335 err = asd_seq_start_cseq(asd_ha);
1336 if (err) {
1337 asd_printk("couldn't start CSEQ for %s\n",
1338 pci_name(asd_ha->pcidev));
1339 return err;
1340 }
1341
1342 lseq_mask = asd_ha->hw_prof.enabled_phys;
1343 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
1344 err = asd_seq_start_lseq(asd_ha, lseq);
1345 if (err) {
1346 asd_printk("coudln't start LSEQ %d for %s\n", lseq,
1347 pci_name(asd_ha->pcidev));
1348 return err;
1349 }
1350 }
1351
1352 return 0;
1353}
1354
1355/**
1356 * asd_update_port_links -- update port_map_by_links and phy_is_up
1357 * @sas_phy: pointer to the phy which has been added to a port
1358 *
1359 * 1) When a link reset has completed and we got BYTES DMAED with a
1360 * valid frame we call this function for that phy, to indicate that
1361 * the phy is up, i.e. we update the phy_is_up in DDB 0. The
1362 * sequencer checks phy_is_up when pending SCBs are to be sent, and
1363 * when an open address frame has been received.
1364 *
1365 * 2) When we know of ports, we call this function to update the map
1366 * of phys participaing in that port, i.e. we update the
1367 * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
1368 * received, the sequencer disables all phys in that port.
1369 * port_map_by_links is also used as the conn_mask byte in the
1370 * initiator/target port DDB.
1371 */
1372void asd_update_port_links(struct asd_sas_phy *sas_phy)
1373{
1374 struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha;
1375 const u8 phy_mask = (u8) sas_phy->port->phy_mask;
1376 u8 phy_is_up;
1377 u8 mask;
1378 int i, err;
1379
1380 for_each_phy(phy_mask, mask, i)
1381 asd_ddbsite_write_byte(asd_ha, 0,
1382 offsetof(struct asd_ddb_seq_shared,
1383 port_map_by_links)+i,phy_mask);
1384
1385 for (i = 0; i < 12; i++) {
1386 phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
1387 offsetof(struct asd_ddb_seq_shared, phy_is_up));
1388 err = asd_ddbsite_update_byte(asd_ha, 0,
1389 offsetof(struct asd_ddb_seq_shared, phy_is_up),
1390 phy_is_up,
1391 phy_is_up | phy_mask);
1392 if (!err)
1393 break;
1394 else if (err == -EFAULT) {
1395 asd_printk("phy_is_up: parity error in DDB 0\n");
1396 break;
1397 }
1398 }
1399
1400 if (err)
1401 asd_printk("couldn't update DDB 0:error:%d\n", err);
1402}
1403
1404MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
new file mode 100644
index 000000000000..42281c36153b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -0,0 +1,70 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SEQ_H_
28#define _AIC94XX_SEQ_H_
29
30#define CSEQ_NUM_VECS 3
31#define LSEQ_NUM_VECS 11
32
33#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw"
34
35/* Note: All quantites in the sequencer file are little endian */
36struct sequencer_file_header {
37 /* Checksum of the entire contents of the sequencer excluding
38 * these four bytes */
39 u32 csum;
40 /* numeric major version */
41 u32 major;
42 /* numeric minor version */
43 u32 minor;
44 /* version string printed by driver */
45 char version[16];
46 u32 cseq_table_offset;
47 u32 cseq_table_size;
48 u32 lseq_table_offset;
49 u32 lseq_table_size;
50 u32 cseq_code_offset;
51 u32 cseq_code_size;
52 u32 lseq_code_offset;
53 u32 lseq_code_size;
54 u16 mode2_task;
55 u16 cseq_idle_loop;
56 u16 lseq_idle_loop;
57} __attribute__((packed));
58
59#ifdef __KERNEL__
60int asd_pause_cseq(struct asd_ha_struct *asd_ha);
61int asd_unpause_cseq(struct asd_ha_struct *asd_ha);
62int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
63int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
64int asd_init_seqs(struct asd_ha_struct *asd_ha);
65int asd_start_seqs(struct asd_ha_struct *asd_ha);
66
67void asd_update_port_links(struct asd_sas_phy *phy);
68#endif
69
70#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
new file mode 100644
index 000000000000..d202ed5a6709
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -0,0 +1,642 @@
1/*
2 * Aic94xx SAS/SATA Tasks
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32static void asd_unbuild_ata_ascb(struct asd_ascb *a);
33static void asd_unbuild_smp_ascb(struct asd_ascb *a);
34static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
35
36static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
41 asd_ha->seq.can_queue += num;
42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
43}
44
45/* PCI_DMA_... to our direction translation.
46 */
47static const u8 data_dir_flags[] = {
48 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
51 [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
52};
53
54static inline int asd_map_scatterlist(struct sas_task *task,
55 struct sg_el *sg_arr,
56 gfp_t gfp_flags)
57{
58 struct asd_ascb *ascb = task->lldd_task;
59 struct asd_ha_struct *asd_ha = ascb->ha;
60 struct scatterlist *sc;
61 int num_sg, res;
62
63 if (task->data_dir == PCI_DMA_NONE)
64 return 0;
65
66 if (task->num_scatter == 0) {
67 void *p = task->scatter;
68 dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
69 task->total_xfer_len,
70 task->data_dir);
71 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
72 sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
73 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
74 return 0;
75 }
76
77 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
78 task->data_dir);
79 if (num_sg == 0)
80 return -ENOMEM;
81
82 if (num_sg > 3) {
83 int i;
84
85 ascb->sg_arr = asd_alloc_coherent(asd_ha,
86 num_sg*sizeof(struct sg_el),
87 gfp_flags);
88 if (!ascb->sg_arr) {
89 res = -ENOMEM;
90 goto err_unmap;
91 }
92 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
93 struct sg_el *sg =
94 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
95 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
96 sg->size = cpu_to_le32((u32)sg_dma_len(sc));
97 if (i == num_sg-1)
98 sg->flags |= ASD_SG_EL_LIST_EOL;
99 }
100
101 for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
102 sg_arr[i].bus_addr =
103 cpu_to_le64((u64)sg_dma_address(sc));
104 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
105 }
106 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
107 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
108
109 memset(&sg_arr[2], 0, sizeof(*sg_arr));
110 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
111 } else {
112 int i;
113 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
114 sg_arr[i].bus_addr =
115 cpu_to_le64((u64)sg_dma_address(sc));
116 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
117 }
118 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
119 }
120
121 return 0;
122err_unmap:
123 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
124 task->data_dir);
125 return res;
126}
127
128static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
129{
130 struct asd_ha_struct *asd_ha = ascb->ha;
131 struct sas_task *task = ascb->uldd_task;
132
133 if (task->data_dir == PCI_DMA_NONE)
134 return;
135
136 if (task->num_scatter == 0) {
137 dma_addr_t dma = (dma_addr_t)
138 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
139 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
140 task->data_dir);
141 return;
142 }
143
144 asd_free_coherent(asd_ha, ascb->sg_arr);
145 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
146 task->data_dir);
147}
148
149/* ---------- Task complete tasklet ---------- */
150
151static void asd_get_response_tasklet(struct asd_ascb *ascb,
152 struct done_list_struct *dl)
153{
154 struct asd_ha_struct *asd_ha = ascb->ha;
155 struct sas_task *task = ascb->uldd_task;
156 struct task_status_struct *ts = &task->task_status;
157 unsigned long flags;
158 struct tc_resp_sb_struct {
159 __le16 index_escb;
160 u8 len_lsb;
161 u8 flags;
162 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
163
164/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
165 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
166 struct asd_ascb *escb;
167 struct asd_dma_tok *edb;
168 void *r;
169
170 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
171 escb = asd_tc_index_find(&asd_ha->seq,
172 (int)le16_to_cpu(resp_sb->index_escb));
173 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
174
175 if (!escb) {
176 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
177 return;
178 }
179
180 ts->buf_valid_size = 0;
181 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
182 r = edb->vaddr;
183 if (task->task_proto == SAS_PROTO_SSP) {
184 struct ssp_response_iu *iu =
185 r + 16 + sizeof(struct ssp_frame_hdr);
186
187 ts->residual = le32_to_cpu(*(__le32 *)r);
188 ts->resp = SAS_TASK_COMPLETE;
189 if (iu->datapres == 0)
190 ts->stat = iu->status;
191 else if (iu->datapres == 1)
192 ts->stat = iu->resp_data[3];
193 else if (iu->datapres == 2) {
194 ts->stat = SAM_CHECK_COND;
195 ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
196 be32_to_cpu(iu->sense_data_len));
197 memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
198 if (iu->status != SAM_CHECK_COND) {
199 ASD_DPRINTK("device %llx sent sense data, but "
200 "stat(0x%x) is not CHECK_CONDITION"
201 "\n",
202 SAS_ADDR(task->dev->sas_addr),
203 ts->stat);
204 }
205 }
206 } else {
207 struct ata_task_resp *resp = (void *) &ts->buf[0];
208
209 ts->residual = le32_to_cpu(*(__le32 *)r);
210
211 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
212 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
213 memcpy(&resp->ending_fis[0], r+16, 24);
214 ts->buf_valid_size = sizeof(*resp);
215 }
216 }
217
218 asd_invalidate_edb(escb, edb_id);
219}
220
221static void asd_task_tasklet_complete(struct asd_ascb *ascb,
222 struct done_list_struct *dl)
223{
224 struct sas_task *task = ascb->uldd_task;
225 struct task_status_struct *ts = &task->task_status;
226 unsigned long flags;
227 u8 opcode = dl->opcode;
228
229 asd_can_dequeue(ascb->ha, 1);
230
231Again:
232 switch (opcode) {
233 case TC_NO_ERROR:
234 ts->resp = SAS_TASK_COMPLETE;
235 ts->stat = SAM_GOOD;
236 break;
237 case TC_UNDERRUN:
238 ts->resp = SAS_TASK_COMPLETE;
239 ts->stat = SAS_DATA_UNDERRUN;
240 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
241 break;
242 case TC_OVERRUN:
243 ts->resp = SAS_TASK_COMPLETE;
244 ts->stat = SAS_DATA_OVERRUN;
245 ts->residual = 0;
246 break;
247 case TC_SSP_RESP:
248 case TC_ATA_RESP:
249 ts->resp = SAS_TASK_COMPLETE;
250 ts->stat = SAS_PROTO_RESPONSE;
251 asd_get_response_tasklet(ascb, dl);
252 break;
253 case TF_OPEN_REJECT:
254 ts->resp = SAS_TASK_UNDELIVERED;
255 ts->stat = SAS_OPEN_REJECT;
256 if (dl->status_block[1] & 2)
257 ts->open_rej_reason = 1 + dl->status_block[2];
258 else if (dl->status_block[1] & 1)
259 ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
260 else
261 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
262 break;
263 case TF_OPEN_TO:
264 ts->resp = SAS_TASK_UNDELIVERED;
265 ts->stat = SAS_OPEN_TO;
266 break;
267 case TF_PHY_DOWN:
268 case TU_PHY_DOWN:
269 ts->resp = SAS_TASK_UNDELIVERED;
270 ts->stat = SAS_PHY_DOWN;
271 break;
272 case TI_PHY_DOWN:
273 ts->resp = SAS_TASK_COMPLETE;
274 ts->stat = SAS_PHY_DOWN;
275 break;
276 case TI_BREAK:
277 case TI_PROTO_ERR:
278 case TI_NAK:
279 case TI_ACK_NAK_TO:
280 case TF_SMP_XMIT_RCV_ERR:
281 case TC_ATA_R_ERR_RECV:
282 ts->resp = SAS_TASK_COMPLETE;
283 ts->stat = SAS_INTERRUPTED;
284 break;
285 case TF_BREAK:
286 case TU_BREAK:
287 case TU_ACK_NAK_TO:
288 case TF_SMPRSP_TO:
289 ts->resp = SAS_TASK_UNDELIVERED;
290 ts->stat = SAS_DEV_NO_RESPONSE;
291 break;
292 case TF_NAK_RECV:
293 ts->resp = SAS_TASK_COMPLETE;
294 ts->stat = SAS_NAK_R_ERR;
295 break;
296 case TA_I_T_NEXUS_LOSS:
297 opcode = dl->status_block[0];
298 goto Again;
299 break;
300 case TF_INV_CONN_HANDLE:
301 ts->resp = SAS_TASK_UNDELIVERED;
302 ts->stat = SAS_DEVICE_UNKNOWN;
303 break;
304 case TF_REQUESTED_N_PENDING:
305 ts->resp = SAS_TASK_UNDELIVERED;
306 ts->stat = SAS_PENDING;
307 break;
308 case TC_TASK_CLEARED:
309 case TA_ON_REQ:
310 ts->resp = SAS_TASK_COMPLETE;
311 ts->stat = SAS_ABORTED_TASK;
312 break;
313
314 case TF_NO_SMP_CONN:
315 case TF_TMF_NO_CTX:
316 case TF_TMF_NO_TAG:
317 case TF_TMF_TAG_FREE:
318 case TF_TMF_TASK_DONE:
319 case TF_TMF_NO_CONN_HANDLE:
320 case TF_IRTT_TO:
321 case TF_IU_SHORT:
322 case TF_DATA_OFFS_ERR:
323 ts->resp = SAS_TASK_UNDELIVERED;
324 ts->stat = SAS_DEV_NO_RESPONSE;
325 break;
326
327 case TC_LINK_ADM_RESP:
328 case TC_CONTROL_PHY:
329 case TC_RESUME:
330 case TC_PARTIAL_SG_LIST:
331 default:
332 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
333 break;
334 }
335
336 switch (task->task_proto) {
337 case SATA_PROTO:
338 case SAS_PROTO_STP:
339 asd_unbuild_ata_ascb(ascb);
340 break;
341 case SAS_PROTO_SMP:
342 asd_unbuild_smp_ascb(ascb);
343 break;
344 case SAS_PROTO_SSP:
345 asd_unbuild_ssp_ascb(ascb);
346 default:
347 break;
348 }
349
350 spin_lock_irqsave(&task->task_state_lock, flags);
351 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
352 task->task_state_flags |= SAS_TASK_STATE_DONE;
353 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
354 spin_unlock_irqrestore(&task->task_state_lock, flags);
355 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
356 "stat 0x%x but aborted by upper layer!\n",
357 task, opcode, ts->resp, ts->stat);
358 complete(&ascb->completion);
359 } else {
360 spin_unlock_irqrestore(&task->task_state_lock, flags);
361 task->lldd_task = NULL;
362 asd_ascb_free(ascb);
363 mb();
364 task->task_done(task);
365 }
366}
367
368/* ---------- ATA ---------- */
369
370static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
371 gfp_t gfp_flags)
372{
373 struct domain_device *dev = task->dev;
374 struct scb *scb;
375 u8 flags;
376 int res = 0;
377
378 scb = ascb->scb;
379
380 if (unlikely(task->ata_task.device_control_reg_update))
381 scb->header.opcode = CONTROL_ATA_DEV;
382 else if (dev->sata_dev.command_set == ATA_COMMAND_SET)
383 scb->header.opcode = INITIATE_ATA_TASK;
384 else
385 scb->header.opcode = INITIATE_ATAPI_TASK;
386
387 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
388 if (dev->port->oob_mode == SAS_OOB_MODE)
389 scb->ata_task.proto_conn_rate |= dev->linkrate;
390
391 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
392 scb->ata_task.fis = task->ata_task.fis;
393 scb->ata_task.fis.fis_type = 0x27;
394 if (likely(!task->ata_task.device_control_reg_update))
395 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
396 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
397 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
398 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
399 16);
400 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
401 scb->ata_task.conn_handle = cpu_to_le16(
402 (u16)(unsigned long)dev->lldd_dev);
403
404 if (likely(!task->ata_task.device_control_reg_update)) {
405 flags = 0;
406 if (task->ata_task.dma_xfer)
407 flags |= DATA_XFER_MODE_DMA;
408 if (task->ata_task.use_ncq &&
409 dev->sata_dev.command_set != ATAPI_COMMAND_SET)
410 flags |= ATA_Q_TYPE_NCQ;
411 flags |= data_dir_flags[task->data_dir];
412 scb->ata_task.ata_flags = flags;
413
414 scb->ata_task.retry_count = task->ata_task.retry_count;
415
416 flags = 0;
417 if (task->ata_task.set_affil_pol)
418 flags |= SET_AFFIL_POLICY;
419 if (task->ata_task.stp_affil_pol)
420 flags |= STP_AFFIL_POLICY;
421 scb->ata_task.flags = flags;
422 }
423 ascb->tasklet_complete = asd_task_tasklet_complete;
424
425 if (likely(!task->ata_task.device_control_reg_update))
426 res = asd_map_scatterlist(task, scb->ata_task.sg_element,
427 gfp_flags);
428
429 return res;
430}
431
432static void asd_unbuild_ata_ascb(struct asd_ascb *a)
433{
434 asd_unmap_scatterlist(a);
435}
436
437/* ---------- SMP ---------- */
438
439static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
440 gfp_t gfp_flags)
441{
442 struct asd_ha_struct *asd_ha = ascb->ha;
443 struct domain_device *dev = task->dev;
444 struct scb *scb;
445
446 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
447 PCI_DMA_FROMDEVICE);
448 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
449 PCI_DMA_FROMDEVICE);
450
451 scb = ascb->scb;
452
453 scb->header.opcode = INITIATE_SMP_TASK;
454
455 scb->smp_task.proto_conn_rate = dev->linkrate;
456
457 scb->smp_task.smp_req.bus_addr =
458 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
459 scb->smp_task.smp_req.size =
460 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
461
462 scb->smp_task.smp_resp.bus_addr =
463 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
464 scb->smp_task.smp_resp.size =
465 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
466
467 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
468 scb->smp_task.conn_handle = cpu_to_le16((u16)
469 (unsigned long)dev->lldd_dev);
470
471 ascb->tasklet_complete = asd_task_tasklet_complete;
472
473 return 0;
474}
475
476static void asd_unbuild_smp_ascb(struct asd_ascb *a)
477{
478 struct sas_task *task = a->uldd_task;
479
480 BUG_ON(!task);
481 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
482 PCI_DMA_FROMDEVICE);
483 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
484 PCI_DMA_FROMDEVICE);
485}
486
487/* ---------- SSP ---------- */
488
489static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
490 gfp_t gfp_flags)
491{
492 struct domain_device *dev = task->dev;
493 struct scb *scb;
494 int res = 0;
495
496 scb = ascb->scb;
497
498 scb->header.opcode = INITIATE_SSP_TASK;
499
500 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
501 scb->ssp_task.proto_conn_rate |= dev->linkrate;
502 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
503 scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
504 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
505 HASHED_SAS_ADDR_SIZE);
506 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
507 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
508 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
509
510 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
511 if (task->ssp_task.enable_first_burst)
512 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
513 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
514 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
515 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16);
516
517 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
518 scb->ssp_task.conn_handle = cpu_to_le16(
519 (u16)(unsigned long)dev->lldd_dev);
520 scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
521 scb->ssp_task.retry_count = scb->ssp_task.retry_count;
522
523 ascb->tasklet_complete = asd_task_tasklet_complete;
524
525 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
526
527 return res;
528}
529
530static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
531{
532 asd_unmap_scatterlist(a);
533}
534
535/* ---------- Execute Task ---------- */
536
537static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
538{
539 int res = 0;
540 unsigned long flags;
541
542 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
543 if ((asd_ha->seq.can_queue - num) < 0)
544 res = -SAS_QUEUE_FULL;
545 else
546 asd_ha->seq.can_queue -= num;
547 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
548
549 return res;
550}
551
552int asd_execute_task(struct sas_task *task, const int num,
553 gfp_t gfp_flags)
554{
555 int res = 0;
556 LIST_HEAD(alist);
557 struct sas_task *t = task;
558 struct asd_ascb *ascb = NULL, *a;
559 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
560
561 res = asd_can_queue(asd_ha, num);
562 if (res)
563 return res;
564
565 res = num;
566 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
567 if (res) {
568 res = -ENOMEM;
569 goto out_err;
570 }
571
572 __list_add(&alist, ascb->list.prev, &ascb->list);
573 list_for_each_entry(a, &alist, list) {
574 a->uldd_task = t;
575 t->lldd_task = a;
576 t = list_entry(t->list.next, struct sas_task, list);
577 }
578 list_for_each_entry(a, &alist, list) {
579 t = a->uldd_task;
580 a->uldd_timer = 1;
581 if (t->task_proto & SAS_PROTO_STP)
582 t->task_proto = SAS_PROTO_STP;
583 switch (t->task_proto) {
584 case SATA_PROTO:
585 case SAS_PROTO_STP:
586 res = asd_build_ata_ascb(a, t, gfp_flags);
587 break;
588 case SAS_PROTO_SMP:
589 res = asd_build_smp_ascb(a, t, gfp_flags);
590 break;
591 case SAS_PROTO_SSP:
592 res = asd_build_ssp_ascb(a, t, gfp_flags);
593 break;
594 default:
595 asd_printk("unknown sas_task proto: 0x%x\n",
596 t->task_proto);
597 res = -ENOMEM;
598 break;
599 }
600 if (res)
601 goto out_err_unmap;
602 }
603 list_del_init(&alist);
604
605 res = asd_post_ascb_list(asd_ha, ascb, num);
606 if (unlikely(res)) {
607 a = NULL;
608 __list_add(&alist, ascb->list.prev, &ascb->list);
609 goto out_err_unmap;
610 }
611
612 return 0;
613out_err_unmap:
614 {
615 struct asd_ascb *b = a;
616 list_for_each_entry(a, &alist, list) {
617 if (a == b)
618 break;
619 t = a->uldd_task;
620 switch (t->task_proto) {
621 case SATA_PROTO:
622 case SAS_PROTO_STP:
623 asd_unbuild_ata_ascb(a);
624 break;
625 case SAS_PROTO_SMP:
626 asd_unbuild_smp_ascb(a);
627 break;
628 case SAS_PROTO_SSP:
629 asd_unbuild_ssp_ascb(a);
630 default:
631 break;
632 }
633 t->lldd_task = NULL;
634 }
635 }
636 list_del_init(&alist);
637out_err:
638 if (ascb)
639 asd_ascb_free_list(ascb);
640 asd_can_dequeue(asd_ha, num);
641 return res;
642}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
new file mode 100644
index 000000000000..61234384503b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -0,0 +1,636 @@
1/*
2 * Aic94xx Task Management Functions
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32/* ---------- Internal enqueue ---------- */
33
34static int asd_enqueue_internal(struct asd_ascb *ascb,
35 void (*tasklet_complete)(struct asd_ascb *,
36 struct done_list_struct *),
37 void (*timed_out)(unsigned long))
38{
39 int res;
40
41 ascb->tasklet_complete = tasklet_complete;
42 ascb->uldd_timer = 1;
43
44 ascb->timer.data = (unsigned long) ascb;
45 ascb->timer.function = timed_out;
46 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
47
48 add_timer(&ascb->timer);
49
50 res = asd_post_ascb_list(ascb->ha, ascb, 1);
51 if (unlikely(res))
52 del_timer(&ascb->timer);
53 return res;
54}
55
56static inline void asd_timedout_common(unsigned long data)
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61
62 spin_lock_irqsave(&seq->pend_q_lock, flags);
63 seq->pending--;
64 list_del_init(&ascb->list);
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
66}
67
68/* ---------- CLEAR NEXUS ---------- */
69
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl)
72{
73 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return;
77 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode;
80 complete(&ascb->completion);
81}
82
83static void asd_clear_nexus_timedout(unsigned long data)
84{
85 struct asd_ascb *ascb = (void *) data;
86
87 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data);
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
90 complete(&ascb->completion);
91}
92
93#define CLEAR_NEXUS_PRE \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \
98 return -ENOMEM; \
99 \
100 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS
102
103#define CLEAR_NEXUS_POST \
104 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
105 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
106 asd_clear_nexus_timedout); \
107 if (res) \
108 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \
112 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \
114out_err: \
115 asd_ascb_free(ascb); \
116 return res
117
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124
125 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER;
127 CLEAR_NEXUS_POST;
128}
129
130int asd_clear_nexus_port(struct asd_sas_port *port)
131{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136
137 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT;
139 scb->clear_nexus.conn_mask = port->phy_mask;
140 CLEAR_NEXUS_POST;
141}
142
143#if 0
144static int asd_clear_nexus_I_T(struct domain_device *dev)
145{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150
151 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
154 if (dev->tproto)
155 scb->clear_nexus.flags |= SUSPEND_TX;
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev);
158 CLEAR_NEXUS_POST;
159}
160#endif
161
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168
169 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev);
177 CLEAR_NEXUS_POST;
178}
179
180static int asd_clear_nexus_tag(struct sas_task *task)
181{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187
188 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG;
190 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
191 scb->clear_nexus.ssp_task.tag = tascb->tag;
192 if (task->dev->tproto)
193 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
194 task->dev->lldd_dev);
195 CLEAR_NEXUS_POST;
196}
197
198static int asd_clear_nexus_index(struct sas_task *task)
199{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205
206 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
208 if (task->dev->tproto)
209 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210 task->dev->lldd_dev);
211 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
212 CLEAR_NEXUS_POST;
213}
214
215/* ---------- TMFs ---------- */
216
217static void asd_tmf_timedout(unsigned long data)
218{
219 struct asd_ascb *ascb = (void *) data;
220
221 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data);
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
224 complete(&ascb->completion);
225}
226
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
228 struct done_list_struct *dl)
229{
230 struct asd_ha_struct *asd_ha = ascb->ha;
231 unsigned long flags;
232 struct tc_resp_sb_struct {
233 __le16 index_escb;
234 u8 len_lsb;
235 u8 flags;
236 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
237
238 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
239 struct asd_ascb *escb;
240 struct asd_dma_tok *edb;
241 struct ssp_frame_hdr *fh;
242 struct ssp_response_iu *ru;
243 int res = TMF_RESP_FUNC_FAILED;
244
245 ASD_DPRINTK("tmf resp tasklet\n");
246
247 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
248 escb = asd_tc_index_find(&asd_ha->seq,
249 (int)le16_to_cpu(resp_sb->index_escb));
250 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
251
252 if (!escb) {
253 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
254 return res;
255 }
256
257 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
258 ascb->tag = *(__be16 *)(edb->vaddr+4);
259 fh = edb->vaddr + 16;
260 ru = edb->vaddr + 16 + sizeof(*fh);
261 res = ru->status;
262 if (ru->datapres == 1) /* Response data present */
263 res = ru->resp_data[3];
264#if 0
265 ascb->tag = fh->tag;
266#endif
267 ascb->tag_valid = 1;
268
269 asd_invalidate_edb(escb, edb_id);
270 return res;
271}
272
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl)
275{
276 if (!del_timer(&ascb->timer))
277 return;
278
279 ASD_DPRINTK("tmf tasklet complete\n");
280
281 if (dl->opcode == TC_SSP_RESP)
282 ascb->uldd_task = (void *) (unsigned long)
283 asd_get_tmf_resp_tasklet(ascb, dl);
284 else
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode;
286
287 complete(&ascb->completion);
288}
289
290static inline int asd_clear_nexus(struct sas_task *task)
291{
292 int res = TMF_RESP_FUNC_FAILED;
293 struct asd_ascb *tascb = task->lldd_task;
294 unsigned long flags;
295
296 ASD_DPRINTK("task not done, clearing nexus\n");
297 if (tascb->tag_valid)
298 res = asd_clear_nexus_tag(task);
299 else
300 res = asd_clear_nexus_index(task);
301 wait_for_completion_timeout(&tascb->completion,
302 AIC94XX_SCB_TIMEOUT);
303 ASD_DPRINTK("came back from clear nexus\n");
304 spin_lock_irqsave(&task->task_state_lock, flags);
305 if (task->task_state_flags & SAS_TASK_STATE_DONE)
306 res = TMF_RESP_FUNC_COMPLETE;
307 spin_unlock_irqrestore(&task->task_state_lock, flags);
308
309 return res;
310}
311
312/**
313 * asd_abort_task -- ABORT TASK TMF
314 * @task: the task to be aborted
315 *
316 * Before calling ABORT TASK the task state flags should be ORed with
317 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
318 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
319 *
320 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
321 * Returns: SAS TMF responses (see sas_task.h),
322 * -ENOMEM,
323 * -SAS_QUEUE_FULL.
324 *
325 * When ABORT TASK returns, the caller of ABORT TASK checks first the
326 * task->task_state_flags, and then the return value of ABORT TASK.
327 *
328 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
329 * task was completed successfully prior to it being aborted. The
330 * caller of ABORT TASK has responsibility to call task->task_done()
331 * xor free the task, depending on their framework. The return code
332 * is TMF_RESP_FUNC_FAILED in this case.
333 *
334 * Else the SAS_TASK_STATE_DONE bit is not set,
335 * If the return code is TMF_RESP_FUNC_COMPLETE, then
336 * the task was aborted successfully. The caller of
337 * ABORT TASK has responsibility to call task->task_done()
338 * to finish the task, xor free the task depending on their
339 * framework.
340 * else
341 * the ABORT TASK returned some kind of error. The task
342 * was _not_ cancelled. Nothing can be assumed.
343 * The caller of ABORT TASK may wish to retry.
344 */
345int asd_abort_task(struct sas_task *task)
346{
347 struct asd_ascb *tascb = task->lldd_task;
348 struct asd_ha_struct *asd_ha = tascb->ha;
349 int res = 1;
350 unsigned long flags;
351 struct asd_ascb *ascb = NULL;
352 struct scb *scb;
353
354 spin_lock_irqsave(&task->task_state_lock, flags);
355 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
356 spin_unlock_irqrestore(&task->task_state_lock, flags);
357 res = TMF_RESP_FUNC_COMPLETE;
358 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
359 goto out_done;
360 }
361 spin_unlock_irqrestore(&task->task_state_lock, flags);
362
363 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
364 if (!ascb)
365 return -ENOMEM;
366 scb = ascb->scb;
367
368 scb->header.opcode = ABORT_TASK;
369
370 switch (task->task_proto) {
371 case SATA_PROTO:
372 case SAS_PROTO_STP:
373 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
374 break;
375 case SAS_PROTO_SSP:
376 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
377 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
378 break;
379 case SAS_PROTO_SMP:
380 break;
381 default:
382 break;
383 }
384
385 if (task->task_proto == SAS_PROTO_SSP) {
386 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
387 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
388 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
389 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
390 task->dev->port->ha->hashed_sas_addr,
391 HASHED_SAS_ADDR_SIZE);
392 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
393
394 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
395 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
396 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
397 }
398
399 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
400 scb->abort_task.conn_handle = cpu_to_le16(
401 (u16)(unsigned long)task->dev->lldd_dev);
402 scb->abort_task.retry_count = 1;
403 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
404 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
405
406 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
407 asd_tmf_timedout);
408 if (res)
409 goto out;
410 wait_for_completion(&ascb->completion);
411 ASD_DPRINTK("tmf came back\n");
412
413 res = (int) (unsigned long) ascb->uldd_task;
414 tascb->tag = ascb->tag;
415 tascb->tag_valid = ascb->tag_valid;
416
417 spin_lock_irqsave(&task->task_state_lock, flags);
418 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
419 spin_unlock_irqrestore(&task->task_state_lock, flags);
420 res = TMF_RESP_FUNC_COMPLETE;
421 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
422 goto out_done;
423 }
424 spin_unlock_irqrestore(&task->task_state_lock, flags);
425
426 switch (res) {
427 /* The task to be aborted has been sent to the device.
428 * We got a Response IU for the ABORT TASK TMF. */
429 case TC_NO_ERROR + 0xFF00:
430 case TMF_RESP_FUNC_COMPLETE:
431 case TMF_RESP_FUNC_FAILED:
432 res = asd_clear_nexus(task);
433 break;
434 case TMF_RESP_INVALID_FRAME:
435 case TMF_RESP_OVERLAPPED_TAG:
436 case TMF_RESP_FUNC_ESUPP:
437 case TMF_RESP_NO_LUN:
438 goto out_done; break;
439 }
440 /* In the following we assume that the managing layer
441 * will _never_ make a mistake, when issuing ABORT TASK.
442 */
443 switch (res) {
444 default:
445 res = asd_clear_nexus(task);
446 /* fallthrough */
447 case TC_NO_ERROR + 0xFF00:
448 case TMF_RESP_FUNC_COMPLETE:
449 break;
450 /* The task hasn't been sent to the device xor we never got
451 * a (sane) Response IU for the ABORT TASK TMF.
452 */
453 case TF_NAK_RECV + 0xFF00:
454 res = TMF_RESP_INVALID_FRAME;
455 break;
456 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
457 res = TMF_RESP_FUNC_FAILED;
458 wait_for_completion_timeout(&tascb->completion,
459 AIC94XX_SCB_TIMEOUT);
460 spin_lock_irqsave(&task->task_state_lock, flags);
461 if (task->task_state_flags & SAS_TASK_STATE_DONE)
462 res = TMF_RESP_FUNC_COMPLETE;
463 spin_unlock_irqrestore(&task->task_state_lock, flags);
464 goto out_done;
465 case TF_TMF_NO_TAG + 0xFF00:
466 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
467 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
468 res = TMF_RESP_FUNC_COMPLETE;
469 goto out_done;
470 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
471 res = TMF_RESP_FUNC_ESUPP;
472 goto out;
473 }
474out_done:
475 if (res == TMF_RESP_FUNC_COMPLETE) {
476 task->lldd_task = NULL;
477 mb();
478 asd_ascb_free(tascb);
479 }
480out:
481 asd_ascb_free(ascb);
482 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
483 return res;
484}
485
486/**
487 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
488 * @dev: pointer to struct domain_device of interest
489 * @lun: pointer to u8[8] which is the LUN
490 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
491 * @index: the transaction context of the task to be queried if QT TMF
492 *
493 * This function is used to send ABORT TASK SET, CLEAR ACA,
494 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
495 *
496 * No SCBs should be queued to the I_T_L nexus when this SCB is
497 * pending.
498 *
499 * Returns: TMF response code (see sas_task.h or the SAS spec)
500 */
501static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
502 int tmf, int index)
503{
504 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
505 struct asd_ascb *ascb;
506 int res = 1;
507 struct scb *scb;
508
509 if (!(dev->tproto & SAS_PROTO_SSP))
510 return TMF_RESP_FUNC_ESUPP;
511
512 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
513 if (!ascb)
514 return -ENOMEM;
515 scb = ascb->scb;
516
517 if (tmf == TMF_QUERY_TASK)
518 scb->header.opcode = QUERY_SSP_TASK;
519 else
520 scb->header.opcode = INITIATE_SSP_TMF;
521
522 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
523 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
524 /* SSP frame header */
525 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
526 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
527 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
528 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
529 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
530 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
531 /* SSP Task IU */
532 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
533 scb->ssp_tmf.ssp_task.tmf = tmf;
534
535 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
536 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
537 dev->lldd_dev);
538 scb->ssp_tmf.retry_count = 1;
539 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
540 if (tmf == TMF_QUERY_TASK)
541 scb->ssp_tmf.index = cpu_to_le16(index);
542
543 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
544 asd_tmf_timedout);
545 if (res)
546 goto out_err;
547 wait_for_completion(&ascb->completion);
548 res = (int) (unsigned long) ascb->uldd_task;
549
550 switch (res) {
551 case TC_NO_ERROR + 0xFF00:
552 res = TMF_RESP_FUNC_COMPLETE;
553 break;
554 case TF_NAK_RECV + 0xFF00:
555 res = TMF_RESP_INVALID_FRAME;
556 break;
557 case TF_TMF_TASK_DONE + 0xFF00:
558 res = TMF_RESP_FUNC_FAILED;
559 break;
560 case TF_TMF_NO_TAG + 0xFF00:
561 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
562 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
563 res = TMF_RESP_FUNC_COMPLETE;
564 break;
565 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
566 res = TMF_RESP_FUNC_ESUPP;
567 break;
568 default:
569 ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n",
570 __FUNCTION__, res);
571 res = TMF_RESP_FUNC_FAILED;
572 break;
573 }
574out_err:
575 asd_ascb_free(ascb);
576 return res;
577}
578
579int asd_abort_task_set(struct domain_device *dev, u8 *lun)
580{
581 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
582
583 if (res == TMF_RESP_FUNC_COMPLETE)
584 asd_clear_nexus_I_T_L(dev, lun);
585 return res;
586}
587
588int asd_clear_aca(struct domain_device *dev, u8 *lun)
589{
590 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
591
592 if (res == TMF_RESP_FUNC_COMPLETE)
593 asd_clear_nexus_I_T_L(dev, lun);
594 return res;
595}
596
597int asd_clear_task_set(struct domain_device *dev, u8 *lun)
598{
599 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
600
601 if (res == TMF_RESP_FUNC_COMPLETE)
602 asd_clear_nexus_I_T_L(dev, lun);
603 return res;
604}
605
606int asd_lu_reset(struct domain_device *dev, u8 *lun)
607{
608 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
609
610 if (res == TMF_RESP_FUNC_COMPLETE)
611 asd_clear_nexus_I_T_L(dev, lun);
612 return res;
613}
614
615/**
616 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
617 * task: pointer to sas_task struct of interest
618 *
619 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
620 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
621 *
622 * Normally the management layer sets the task to aborted state,
623 * and then calls query task and then abort task.
624 */
625int asd_query_task(struct sas_task *task)
626{
627 struct asd_ascb *ascb = task->lldd_task;
628 int index;
629
630 if (ascb) {
631 index = ascb->tc_index;
632 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
633 TMF_QUERY_TASK, index);
634 }
635 return TMF_RESP_FUNC_COMPLETE;
636}
diff --git a/drivers/scsi/amiga7xx.c b/drivers/scsi/amiga7xx.c
index c0844fa32c5d..9099d531d5a4 100644
--- a/drivers/scsi/amiga7xx.c
+++ b/drivers/scsi/amiga7xx.c
@@ -11,7 +11,6 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/config.h>
15#include <linux/zorro.h> 14#include <linux/zorro.h>
16#include <linux/stat.h> 15#include <linux/stat.h>
17 16
diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile
new file mode 100644
index 000000000000..721aced39168
--- /dev/null
+++ b/drivers/scsi/arcmsr/Makefile
@@ -0,0 +1,6 @@
1# File: drivers/arcmsr/Makefile
2# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver.
3
4arcmsr-objs := arcmsr_attr.o arcmsr_hba.o
5
6obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
new file mode 100644
index 000000000000..aff96db9ccf6
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -0,0 +1,472 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr.h
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved.
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44*/
45#include <linux/interrupt.h>
46
47struct class_device_attribute;
48
49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 288
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13"
52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_TARGETID 17
55#define ARCMSR_MAX_TARGETLUN 8
56#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
57#define ARCMSR_MAX_QBUFFER 4096
58#define ARCMSR_MAX_SG_ENTRIES 38
59
60/*
61*******************************************************************************
62** split 64bits dma addressing
63*******************************************************************************
64*/
65#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
66#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
67/*
68*******************************************************************************
69** MESSAGE CONTROL CODE
70*******************************************************************************
71*/
72struct CMD_MESSAGE
73{
74 uint32_t HeaderLength;
75 uint8_t Signature[8];
76 uint32_t Timeout;
77 uint32_t ControlCode;
78 uint32_t ReturnCode;
79 uint32_t Length;
80};
81/*
82*******************************************************************************
83** IOP Message Transfer Data for user space
84*******************************************************************************
85*/
86struct CMD_MESSAGE_FIELD
87{
88 struct CMD_MESSAGE cmdmessage;
89 uint8_t messagedatabuffer[1032];
90};
91/* IOP message transfer */
92#define ARCMSR_MESSAGE_FAIL 0x0001
93/* DeviceType */
94#define ARECA_SATA_RAID 0x90000000
95/* FunctionCode */
96#define FUNCTION_READ_RQBUFFER 0x0801
97#define FUNCTION_WRITE_WQBUFFER 0x0802
98#define FUNCTION_CLEAR_RQBUFFER 0x0803
99#define FUNCTION_CLEAR_WQBUFFER 0x0804
100#define FUNCTION_CLEAR_ALLQBUFFER 0x0805
101#define FUNCTION_RETURN_CODE_3F 0x0806
102#define FUNCTION_SAY_HELLO 0x0807
103#define FUNCTION_SAY_GOODBYE 0x0808
104#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
105/* ARECA IO CONTROL CODE*/
106#define ARCMSR_MESSAGE_READ_RQBUFFER \
107 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
108#define ARCMSR_MESSAGE_WRITE_WQBUFFER \
109 ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER
110#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \
111 ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER
112#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \
113 ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER
114#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \
115 ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER
116#define ARCMSR_MESSAGE_RETURN_CODE_3F \
117 ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F
118#define ARCMSR_MESSAGE_SAY_HELLO \
119 ARECA_SATA_RAID | FUNCTION_SAY_HELLO
120#define ARCMSR_MESSAGE_SAY_GOODBYE \
121 ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE
122#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
123 ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
124/* ARECA IOCTL ReturnCode */
125#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
126#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
127#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
128/*
129*************************************************************
130** structure for holding DMA address data
131*************************************************************
132*/
133#define IS_SG64_ADDR 0x01000000 /* bit24 */
134struct SG32ENTRY
135{
136 uint32_t length;
137 uint32_t address;
138};
139struct SG64ENTRY
140{
141 uint32_t length;
142 uint32_t address;
143 uint32_t addresshigh;
144};
145struct SGENTRY_UNION
146{
147 union
148 {
149 struct SG32ENTRY sg32entry;
150 struct SG64ENTRY sg64entry;
151 }u;
152};
153/*
154********************************************************************
155** Q Buffer of IOP Message Transfer
156********************************************************************
157*/
158struct QBUFFER
159{
160 uint32_t data_len;
161 uint8_t data[124];
162};
163/*
164*******************************************************************************
165** FIRMWARE INFO
166*******************************************************************************
167*/
168struct FIRMWARE_INFO
169{
170 uint32_t signature; /*0, 00-03*/
171 uint32_t request_len; /*1, 04-07*/
172 uint32_t numbers_queue; /*2, 08-11*/
173 uint32_t sdram_size; /*3, 12-15*/
174 uint32_t ide_channels; /*4, 16-19*/
175 char vendor[40]; /*5, 20-59*/
176 char model[8]; /*15, 60-67*/
177 char firmware_ver[16]; /*17, 68-83*/
178 char device_map[16]; /*21, 84-99*/
179};
180/* signature of set and get firmware config */
181#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
182#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
183/* message code of inbound message register */
184#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
185#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
186#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
187#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
188#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
189#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
190#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
191#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
192#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
193/* doorbell interrupt generator */
194#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
195#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
196#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
197#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
198/* ccb areca cdb flag */
199#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
200#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
201#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
202#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000
203/* outbound firmware ok */
204#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
205/*
206*******************************************************************************
207** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
208*******************************************************************************
209*/
210struct ARCMSR_CDB
211{
212 uint8_t Bus;
213 uint8_t TargetID;
214 uint8_t LUN;
215 uint8_t Function;
216
217 uint8_t CdbLength;
218 uint8_t sgcount;
219 uint8_t Flags;
220#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
221#define ARCMSR_CDB_FLAG_BIOS 0x02
222#define ARCMSR_CDB_FLAG_WRITE 0x04
223#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00
224#define ARCMSR_CDB_FLAG_HEADQ 0x08
225#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
226 uint8_t Reserved1;
227
228 uint32_t Context;
229 uint32_t DataLength;
230
231 uint8_t Cdb[16];
232
233 uint8_t DeviceStatus;
234#define ARCMSR_DEV_CHECK_CONDITION 0x02
235#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
236#define ARCMSR_DEV_ABORTED 0xF1
237#define ARCMSR_DEV_INIT_FAIL 0xF2
238 uint8_t SenseData[15];
239
240 union
241 {
242 struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES];
243 struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES];
244 } u;
245};
246/*
247*******************************************************************************
248** Messaging Unit (MU) of the Intel R 80331 I/O processor (80331)
249*******************************************************************************
250*/
251struct MessageUnit
252{
253 uint32_t resrved0[4]; /*0000 000F*/
254 uint32_t inbound_msgaddr0; /*0010 0013*/
255 uint32_t inbound_msgaddr1; /*0014 0017*/
256 uint32_t outbound_msgaddr0; /*0018 001B*/
257 uint32_t outbound_msgaddr1; /*001C 001F*/
258 uint32_t inbound_doorbell; /*0020 0023*/
259 uint32_t inbound_intstatus; /*0024 0027*/
260 uint32_t inbound_intmask; /*0028 002B*/
261 uint32_t outbound_doorbell; /*002C 002F*/
262 uint32_t outbound_intstatus; /*0030 0033*/
263 uint32_t outbound_intmask; /*0034 0037*/
264 uint32_t reserved1[2]; /*0038 003F*/
265 uint32_t inbound_queueport; /*0040 0043*/
266 uint32_t outbound_queueport; /*0044 0047*/
267 uint32_t reserved2[2]; /*0048 004F*/
268 uint32_t reserved3[492]; /*0050 07FF 492*/
269 uint32_t reserved4[128]; /*0800 09FF 128*/
270 uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/
271 uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/
272 uint32_t reserved5[32]; /*0E80 0EFF 32*/
273 uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/
274 uint32_t reserved6[32]; /*0F80 0FFF 32*/
275};
276/*
277*******************************************************************************
278** Adapter Control Block
279*******************************************************************************
280*/
281struct AdapterControlBlock
282{
283 struct pci_dev * pdev;
284 struct Scsi_Host * host;
285 unsigned long vir2phy_offset;
286 /* Offset is used in making arc cdb physical to virtual calculations */
287 uint32_t outbound_int_enable;
288
289 struct MessageUnit __iomem * pmu;
290 /* message unit ATU inbound base address0 */
291
292 uint32_t acb_flags;
293#define ACB_F_SCSISTOPADAPTER 0x0001
294#define ACB_F_MSG_STOP_BGRB 0x0002
295 /* stop RAID background rebuild */
296#define ACB_F_MSG_START_BGRB 0x0004
297 /* stop RAID background rebuild */
298#define ACB_F_IOPDATA_OVERFLOW 0x0008
299 /* iop message data rqbuffer overflow */
300#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
301 /* message clear wqbuffer */
302#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
303 /* message clear rqbuffer */
304#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
305#define ACB_F_BUS_RESET 0x0080
306#define ACB_F_IOP_INITED 0x0100
307 /* iop init */
308
309 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
310 /* used for memory free */
311 struct list_head ccb_free_list;
312 /* head of free ccb list */
313 atomic_t ccboutstandingcount;
314
315 void * dma_coherent;
316 /* dma_coherent used for memory free */
317 dma_addr_t dma_coherent_handle;
318 /* dma_coherent_handle used for memory free */
319
320 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
321 /* data collection buffer for read from 80331 */
322 int32_t rqbuf_firstindex;
323 /* first of read buffer */
324 int32_t rqbuf_lastindex;
325 /* last of read buffer */
326 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
327 /* data collection buffer for write to 80331 */
328 int32_t wqbuf_firstindex;
329 /* first of write buffer */
330 int32_t wqbuf_lastindex;
331 /* last of write buffer */
332 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
333 /* id0 ..... id15, lun0...lun7 */
334#define ARECA_RAID_GONE 0x55
335#define ARECA_RAID_GOOD 0xaa
336 uint32_t num_resets;
337 uint32_t num_aborts;
338 uint32_t firm_request_len;
339 uint32_t firm_numbers_queue;
340 uint32_t firm_sdram_size;
341 uint32_t firm_hd_channels;
342 char firm_model[12];
343 char firm_version[20];
344};/* HW_DEVICE_EXTENSION */
345/*
346*******************************************************************************
347** Command Control Block
348** this CCB length must be 32 bytes boundary
349*******************************************************************************
350*/
351struct CommandControlBlock
352{
353 struct ARCMSR_CDB arcmsr_cdb;
354 /*
355 ** 0-503 (size of CDB=504):
356 ** arcmsr messenger scsi command descriptor size 504 bytes
357 */
358 uint32_t cdb_shifted_phyaddr;
359 /* 504-507 */
360 uint32_t reserved1;
361 /* 508-511 */
362#if BITS_PER_LONG == 64
363 /* ======================512+64 bytes======================== */
364 struct list_head list;
365 /* 512-527 16 bytes next/prev ptrs for ccb lists */
366 struct scsi_cmnd * pcmd;
367 /* 528-535 8 bytes pointer of linux scsi command */
368 struct AdapterControlBlock * acb;
369 /* 536-543 8 bytes pointer of acb */
370
371 uint16_t ccb_flags;
372 /* 544-545 */
373 #define CCB_FLAG_READ 0x0000
374 #define CCB_FLAG_WRITE 0x0001
375 #define CCB_FLAG_ERROR 0x0002
376 #define CCB_FLAG_FLUSHCACHE 0x0004
377 #define CCB_FLAG_MASTER_ABORTED 0x0008
378 uint16_t startdone;
379 /* 546-547 */
380 #define ARCMSR_CCB_DONE 0x0000
381 #define ARCMSR_CCB_START 0x55AA
382 #define ARCMSR_CCB_ABORTED 0xAA55
383 #define ARCMSR_CCB_ILLEGAL 0xFFFF
384 uint32_t reserved2[7];
385 /* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */
386#else
387 /* ======================512+32 bytes======================== */
388 struct list_head list;
389 /* 512-519 8 bytes next/prev ptrs for ccb lists */
390 struct scsi_cmnd * pcmd;
391 /* 520-523 4 bytes pointer of linux scsi command */
392 struct AdapterControlBlock * acb;
393 /* 524-527 4 bytes pointer of acb */
394
395 uint16_t ccb_flags;
396 /* 528-529 */
397 #define CCB_FLAG_READ 0x0000
398 #define CCB_FLAG_WRITE 0x0001
399 #define CCB_FLAG_ERROR 0x0002
400 #define CCB_FLAG_FLUSHCACHE 0x0004
401 #define CCB_FLAG_MASTER_ABORTED 0x0008
402 uint16_t startdone;
403 /* 530-531 */
404 #define ARCMSR_CCB_DONE 0x0000
405 #define ARCMSR_CCB_START 0x55AA
406 #define ARCMSR_CCB_ABORTED 0xAA55
407 #define ARCMSR_CCB_ILLEGAL 0xFFFF
408 uint32_t reserved2[3];
409 /* 532-535 536-539 540-543 */
410#endif
411 /* ========================================================== */
412};
413/*
414*******************************************************************************
415** ARECA SCSI sense data
416*******************************************************************************
417*/
418struct SENSE_DATA
419{
420 uint8_t ErrorCode:7;
421#define SCSI_SENSE_CURRENT_ERRORS 0x70
422#define SCSI_SENSE_DEFERRED_ERRORS 0x71
423 uint8_t Valid:1;
424 uint8_t SegmentNumber;
425 uint8_t SenseKey:4;
426 uint8_t Reserved:1;
427 uint8_t IncorrectLength:1;
428 uint8_t EndOfMedia:1;
429 uint8_t FileMark:1;
430 uint8_t Information[4];
431 uint8_t AdditionalSenseLength;
432 uint8_t CommandSpecificInformation[4];
433 uint8_t AdditionalSenseCode;
434 uint8_t AdditionalSenseCodeQualifier;
435 uint8_t FieldReplaceableUnitCode;
436 uint8_t SenseKeySpecific[3];
437};
438/*
439*******************************************************************************
440** Outbound Interrupt Status Register - OISR
441*******************************************************************************
442*/
443#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
444#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
445#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
446#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
447#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
448#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
449#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
450 (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
451 |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
452 |ARCMSR_MU_OUTBOUND_DOORBELL_INT \
453 |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \
454 |ARCMSR_MU_OUTBOUND_PCI_INT)
455/*
456*******************************************************************************
457** Outbound Interrupt Mask Register - OIMR
458*******************************************************************************
459*/
460#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
461#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
462#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
463#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
464#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
465#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
466#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
467
468extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
469extern struct class_device_attribute *arcmsr_host_attrs[];
470extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
471void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
472
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
new file mode 100644
index 000000000000..12497da5529d
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -0,0 +1,381 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen
6** Description: attributes exported to sysfs and device host
7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
9**
10** Web site: www.areca.com.tw
11** E-mail: erich@areca.com.tw
12**
13** This program is free software; you can redistribute it and/or modify
14** it under the terms of the GNU General Public License version 2 as
15** published by the Free Software Foundation.
16** This program is distributed in the hope that it will be useful,
17** but WITHOUT ANY WARRANTY; without even the implied warranty of
18** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19** GNU General Public License for more details.
20*******************************************************************************
21** Redistribution and use in source and binary forms, with or without
22** modification, are permitted provided that the following conditions
23** are met:
24** 1. Redistributions of source code must retain the above copyright
25** notice, this list of conditions and the following disclaimer.
26** 2. Redistributions in binary form must reproduce the above copyright
27** notice, this list of conditions and the following disclaimer in the
28** documentation and/or other materials provided with the distribution.
29** 3. The name of the author may not be used to endorse or promote products
30** derived from this software without specific prior written permission.
31**
32** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42*******************************************************************************
43** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
45*******************************************************************************
46*/
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/delay.h>
52#include <linux/pci.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport.h>
58#include "arcmsr.h"
59
60struct class_device_attribute *arcmsr_host_attrs[];
61
62static ssize_t
63arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off,
64 size_t count)
65{
66 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
67 struct Scsi_Host *host = class_to_shost(cdev);
68 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
69 struct MessageUnit __iomem *reg = acb->pmu;
70 uint8_t *pQbuffer,*ptmpQbuffer;
71 int32_t allxfer_len = 0;
72
73 if (!capable(CAP_SYS_ADMIN))
74 return -EACCES;
75
76 /* do message unit read. */
77 ptmpQbuffer = (uint8_t *)buf;
78 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
79 && (allxfer_len < 1031)) {
80 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
81 memcpy(ptmpQbuffer, pQbuffer, 1);
82 acb->rqbuf_firstindex++;
83 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
84 ptmpQbuffer++;
85 allxfer_len++;
86 }
87 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
88 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
89 &reg->message_rbuffer;
90 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
91 int32_t iop_len;
92
93 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
94 iop_len = readl(&prbuffer->data_len);
95 while (iop_len > 0) {
96 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
97 acb->rqbuf_lastindex++;
98 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
99 iop_data++;
100 iop_len--;
101 }
102 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
103 &reg->inbound_doorbell);
104 }
105 return (allxfer_len);
106}
107
108static ssize_t
109arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off,
110 size_t count)
111{
112 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
113 struct Scsi_Host *host = class_to_shost(cdev);
114 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
115 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
116 uint8_t *pQbuffer, *ptmpuserbuffer;
117
118 if (!capable(CAP_SYS_ADMIN))
119 return -EACCES;
120 if (count > 1032)
121 return -EINVAL;
122 /* do message unit write. */
123 ptmpuserbuffer = (uint8_t *)buf;
124 user_len = (int32_t)count;
125 wqbuf_lastindex = acb->wqbuf_lastindex;
126 wqbuf_firstindex = acb->wqbuf_firstindex;
127 if (wqbuf_lastindex != wqbuf_firstindex) {
128 arcmsr_post_Qbuffer(acb);
129 return 0; /*need retry*/
130 } else {
131 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
132 &(ARCMSR_MAX_QBUFFER - 1);
133 if (my_empty_len >= user_len) {
134 while (user_len > 0) {
135 pQbuffer =
136 &acb->wqbuffer[acb->wqbuf_lastindex];
137 memcpy(pQbuffer, ptmpuserbuffer, 1);
138 acb->wqbuf_lastindex++;
139 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
140 ptmpuserbuffer++;
141 user_len--;
142 }
143 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
144 acb->acb_flags &=
145 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
146 arcmsr_post_Qbuffer(acb);
147 }
148 return count;
149 } else {
150 return 0; /*need retry*/
151 }
152 }
153}
154
155static ssize_t
156arcmsr_sysfs_iop_message_clear(struct kobject *kobj, char *buf, loff_t off,
157 size_t count)
158{
159 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
160 struct Scsi_Host *host = class_to_shost(cdev);
161 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
162 struct MessageUnit __iomem *reg = acb->pmu;
163 uint8_t *pQbuffer;
164
165 if (!capable(CAP_SYS_ADMIN))
166 return -EACCES;
167
168 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
169 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
170 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
171 , &reg->inbound_doorbell);
172 }
173 acb->acb_flags |=
174 (ACB_F_MESSAGE_WQBUFFER_CLEARED
175 | ACB_F_MESSAGE_RQBUFFER_CLEARED
176 | ACB_F_MESSAGE_WQBUFFER_READED);
177 acb->rqbuf_firstindex = 0;
178 acb->rqbuf_lastindex = 0;
179 acb->wqbuf_firstindex = 0;
180 acb->wqbuf_lastindex = 0;
181 pQbuffer = acb->rqbuffer;
182 memset(pQbuffer, 0, sizeof (struct QBUFFER));
183 pQbuffer = acb->wqbuffer;
184 memset(pQbuffer, 0, sizeof (struct QBUFFER));
185 return 1;
186}
187
188static struct bin_attribute arcmsr_sysfs_message_read_attr = {
189 .attr = {
190 .name = "mu_read",
191 .mode = S_IRUSR ,
192 .owner = THIS_MODULE,
193 },
194 .size = 1032,
195 .read = arcmsr_sysfs_iop_message_read,
196};
197
198static struct bin_attribute arcmsr_sysfs_message_write_attr = {
199 .attr = {
200 .name = "mu_write",
201 .mode = S_IWUSR,
202 .owner = THIS_MODULE,
203 },
204 .size = 1032,
205 .write = arcmsr_sysfs_iop_message_write,
206};
207
208static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
209 .attr = {
210 .name = "mu_clear",
211 .mode = S_IWUSR,
212 .owner = THIS_MODULE,
213 },
214 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear,
216};
217
218int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
219{
220 struct Scsi_Host *host = acb->host;
221 int error;
222
223 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
224 &arcmsr_sysfs_message_read_attr);
225 if (error) {
226 printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
227 goto error_bin_file_message_read;
228 }
229 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
230 &arcmsr_sysfs_message_write_attr);
231 if (error) {
232 printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
233 goto error_bin_file_message_write;
234 }
235 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
236 &arcmsr_sysfs_message_clear_attr);
237 if (error) {
238 printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
239 goto error_bin_file_message_clear;
240 }
241 return 0;
242error_bin_file_message_clear:
243 sysfs_remove_bin_file(&host->shost_classdev.kobj,
244 &arcmsr_sysfs_message_write_attr);
245error_bin_file_message_write:
246 sysfs_remove_bin_file(&host->shost_classdev.kobj,
247 &arcmsr_sysfs_message_read_attr);
248error_bin_file_message_read:
249 return error;
250}
251
252void
253arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) {
254 struct Scsi_Host *host = acb->host;
255
256 sysfs_remove_bin_file(&host->shost_classdev.kobj,
257 &arcmsr_sysfs_message_clear_attr);
258 sysfs_remove_bin_file(&host->shost_classdev.kobj,
259 &arcmsr_sysfs_message_write_attr);
260 sysfs_remove_bin_file(&host->shost_classdev.kobj,
261 &arcmsr_sysfs_message_read_attr);
262}
263
264
265static ssize_t
266arcmsr_attr_host_driver_version(struct class_device *cdev, char *buf) {
267 return snprintf(buf, PAGE_SIZE,
268 "%s\n",
269 ARCMSR_DRIVER_VERSION);
270}
271
272static ssize_t
273arcmsr_attr_host_driver_posted_cmd(struct class_device *cdev, char *buf) {
274 struct Scsi_Host *host = class_to_shost(cdev);
275 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
276 return snprintf(buf, PAGE_SIZE,
277 "%4d\n",
278 atomic_read(&acb->ccboutstandingcount));
279}
280
281static ssize_t
282arcmsr_attr_host_driver_reset(struct class_device *cdev, char *buf) {
283 struct Scsi_Host *host = class_to_shost(cdev);
284 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
285 return snprintf(buf, PAGE_SIZE,
286 "%4d\n",
287 acb->num_resets);
288}
289
290static ssize_t
291arcmsr_attr_host_driver_abort(struct class_device *cdev, char *buf) {
292 struct Scsi_Host *host = class_to_shost(cdev);
293 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
294 return snprintf(buf, PAGE_SIZE,
295 "%4d\n",
296 acb->num_aborts);
297}
298
299static ssize_t
300arcmsr_attr_host_fw_model(struct class_device *cdev, char *buf) {
301 struct Scsi_Host *host = class_to_shost(cdev);
302 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
303 return snprintf(buf, PAGE_SIZE,
304 "%s\n",
305 acb->firm_model);
306}
307
308static ssize_t
309arcmsr_attr_host_fw_version(struct class_device *cdev, char *buf) {
310 struct Scsi_Host *host = class_to_shost(cdev);
311 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
312
313 return snprintf(buf, PAGE_SIZE,
314 "%s\n",
315 acb->firm_version);
316}
317
318static ssize_t
319arcmsr_attr_host_fw_request_len(struct class_device *cdev, char *buf) {
320 struct Scsi_Host *host = class_to_shost(cdev);
321 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
322
323 return snprintf(buf, PAGE_SIZE,
324 "%4d\n",
325 acb->firm_request_len);
326}
327
328static ssize_t
329arcmsr_attr_host_fw_numbers_queue(struct class_device *cdev, char *buf) {
330 struct Scsi_Host *host = class_to_shost(cdev);
331 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
332
333 return snprintf(buf, PAGE_SIZE,
334 "%4d\n",
335 acb->firm_numbers_queue);
336}
337
338static ssize_t
339arcmsr_attr_host_fw_sdram_size(struct class_device *cdev, char *buf) {
340 struct Scsi_Host *host = class_to_shost(cdev);
341 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
342
343 return snprintf(buf, PAGE_SIZE,
344 "%4d\n",
345 acb->firm_sdram_size);
346}
347
348static ssize_t
349arcmsr_attr_host_fw_hd_channels(struct class_device *cdev, char *buf) {
350 struct Scsi_Host *host = class_to_shost(cdev);
351 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
352
353 return snprintf(buf, PAGE_SIZE,
354 "%4d\n",
355 acb->firm_hd_channels);
356}
357
358static CLASS_DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
359static CLASS_DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
360static CLASS_DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
361static CLASS_DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
362static CLASS_DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
363static CLASS_DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
364static CLASS_DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
365static CLASS_DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
366static CLASS_DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
367static CLASS_DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
368
369struct class_device_attribute *arcmsr_host_attrs[] = {
370 &class_device_attr_host_driver_version,
371 &class_device_attr_host_driver_posted_cmd,
372 &class_device_attr_host_driver_reset,
373 &class_device_attr_host_driver_abort,
374 &class_device_attr_host_fw_model,
375 &class_device_attr_host_fw_version,
376 &class_device_attr_host_fw_request_len,
377 &class_device_attr_host_fw_numbers_queue,
378 &class_device_attr_host_fw_sdram_size,
379 &class_device_attr_host_fw_hd_channels,
380 NULL,
381};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
new file mode 100644
index 000000000000..475f978ff8f0
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -0,0 +1,1496 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46*******************************************************************************
47*/
48#include <linux/module.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/pci_ids.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/errno.h>
55#include <linux/types.h>
56#include <linux/delay.h>
57#include <linux/dma-mapping.h>
58#include <linux/timer.h>
59#include <linux/pci.h>
60#include <asm/dma.h>
61#include <asm/io.h>
62#include <asm/system.h>
63#include <asm/uaccess.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_tcq.h>
68#include <scsi/scsi_device.h>
69#include <scsi/scsi_transport.h>
70#include <scsi/scsicam.h>
71#include "arcmsr.h"
72
73MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79static int arcmsr_abort(struct scsi_cmnd *);
80static int arcmsr_bus_reset(struct scsi_cmnd *);
81static int arcmsr_bios_param(struct scsi_device *sdev,
82 struct block_device *bdev, sector_t capacity, int *info);
83static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84 void (*done) (struct scsi_cmnd *));
85static int arcmsr_probe(struct pci_dev *pdev,
86 const struct pci_device_id *id);
87static void arcmsr_remove(struct pci_dev *pdev);
88static void arcmsr_shutdown(struct pci_dev *pdev);
89static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94static const char *arcmsr_info(struct Scsi_Host *);
95static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98{
99 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100 queue_depth = ARCMSR_MAX_CMD_PERLUN;
101 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102 return queue_depth;
103}
104
105static struct scsi_host_template arcmsr_scsi_host_template = {
106 .module = THIS_MODULE,
107 .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108 .info = arcmsr_info,
109 .queuecommand = arcmsr_queue_command,
110 .eh_abort_handler = arcmsr_abort,
111 .eh_bus_reset_handler = arcmsr_bus_reset,
112 .bios_param = arcmsr_bios_param,
113 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
114 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
115 .this_id = ARCMSR_SCSI_INITIATOR_ID,
116 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
117 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
118 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
119 .use_clustering = ENABLE_CLUSTERING,
120 .shost_attrs = arcmsr_host_attrs,
121};
122
123static struct pci_device_id arcmsr_device_id_table[] = {
124 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139 {0, 0}, /* Terminating entry */
140};
141MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142static struct pci_driver arcmsr_pci_driver = {
143 .name = "arcmsr",
144 .id_table = arcmsr_device_id_table,
145 .probe = arcmsr_probe,
146 .remove = arcmsr_remove,
147 .shutdown = arcmsr_shutdown
148};
149
150static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id,
151 struct pt_regs *regs)
152{
153 irqreturn_t handle_state;
154 struct AdapterControlBlock *acb;
155 unsigned long flags;
156
157 acb = (struct AdapterControlBlock *)dev_id;
158
159 spin_lock_irqsave(acb->host->host_lock, flags);
160 handle_state = arcmsr_interrupt(acb);
161 spin_unlock_irqrestore(acb->host->host_lock, flags);
162 return handle_state;
163}
164
165static int arcmsr_bios_param(struct scsi_device *sdev,
166 struct block_device *bdev, sector_t capacity, int *geom)
167{
168 int ret, heads, sectors, cylinders, total_capacity;
169 unsigned char *buffer;/* return copy of block device's partition table */
170
171 buffer = scsi_bios_ptable(bdev);
172 if (buffer) {
173 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
174 kfree(buffer);
175 if (ret != -1)
176 return ret;
177 }
178 total_capacity = capacity;
179 heads = 64;
180 sectors = 32;
181 cylinders = total_capacity / (heads * sectors);
182 if (cylinders > 1024) {
183 heads = 255;
184 sectors = 63;
185 cylinders = total_capacity / (heads * sectors);
186 }
187 geom[0] = heads;
188 geom[1] = sectors;
189 geom[2] = cylinders;
190 return 0;
191}
192
193static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
194{
195 struct pci_dev *pdev = acb->pdev;
196 struct MessageUnit __iomem *reg = acb->pmu;
197 u32 ccb_phyaddr_hi32;
198 void *dma_coherent;
199 dma_addr_t dma_coherent_handle, dma_addr;
200 struct CommandControlBlock *ccb_tmp;
201 int i, j;
202
203 dma_coherent = dma_alloc_coherent(&pdev->dev,
204 ARCMSR_MAX_FREECCB_NUM *
205 sizeof (struct CommandControlBlock) + 0x20,
206 &dma_coherent_handle, GFP_KERNEL);
207 if (!dma_coherent)
208 return -ENOMEM;
209
210 acb->dma_coherent = dma_coherent;
211 acb->dma_coherent_handle = dma_coherent_handle;
212
213 if (((unsigned long)dma_coherent & 0x1F)) {
214 dma_coherent = dma_coherent +
215 (0x20 - ((unsigned long)dma_coherent & 0x1F));
216 dma_coherent_handle = dma_coherent_handle +
217 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
218 }
219
220 dma_addr = dma_coherent_handle;
221 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
222 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
223 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
224 ccb_tmp->acb = acb;
225 acb->pccb_pool[i] = ccb_tmp;
226 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
227 dma_addr = dma_addr + sizeof (struct CommandControlBlock);
228 ccb_tmp++;
229 }
230
231 acb->vir2phy_offset = (unsigned long)ccb_tmp -
232 (unsigned long)dma_addr;
233 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
234 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
235 acb->devstate[i][j] = ARECA_RAID_GOOD;
236
237 /*
238 ** here we need to tell iop 331 our ccb_tmp.HighPart
239 ** if ccb_tmp.HighPart is not zero
240 */
241 ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
242 if (ccb_phyaddr_hi32 != 0) {
243 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
244 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
245 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
246 if (arcmsr_wait_msgint_ready(acb))
247 printk(KERN_NOTICE "arcmsr%d: "
248 "'set ccb high part physical address' timeout\n",
249 acb->host->host_no);
250 }
251
252 writel(readl(&reg->outbound_intmask) |
253 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
254 &reg->outbound_intmask);
255 return 0;
256}
257
258static int arcmsr_probe(struct pci_dev *pdev,
259 const struct pci_device_id *id)
260{
261 struct Scsi_Host *host;
262 struct AdapterControlBlock *acb;
263 uint8_t bus, dev_fun;
264 int error;
265
266 error = pci_enable_device(pdev);
267 if (error)
268 goto out;
269 pci_set_master(pdev);
270
271 host = scsi_host_alloc(&arcmsr_scsi_host_template,
272 sizeof(struct AdapterControlBlock));
273 if (!host) {
274 error = -ENOMEM;
275 goto out_disable_device;
276 }
277 acb = (struct AdapterControlBlock *)host->hostdata;
278 memset(acb, 0, sizeof (struct AdapterControlBlock));
279
280 error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
281 if (error) {
282 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
283 if (error) {
284 printk(KERN_WARNING
285 "scsi%d: No suitable DMA mask available\n",
286 host->host_no);
287 goto out_host_put;
288 }
289 }
290 bus = pdev->bus->number;
291 dev_fun = pdev->devfn;
292 acb->host = host;
293 acb->pdev = pdev;
294 host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
295 host->max_lun = ARCMSR_MAX_TARGETLUN;
296 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
297 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
298 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
299 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
300 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
301 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
302 host->unique_id = (bus << 8) | dev_fun;
303 host->irq = pdev->irq;
304 error = pci_request_regions(pdev, "arcmsr");
305 if (error)
306 goto out_host_put;
307
308 acb->pmu = ioremap(pci_resource_start(pdev, 0),
309 pci_resource_len(pdev, 0));
310 if (!acb->pmu) {
311 printk(KERN_NOTICE "arcmsr%d: memory"
312 " mapping region fail \n", acb->host->host_no);
313 goto out_release_regions;
314 }
315 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
316 ACB_F_MESSAGE_RQBUFFER_CLEARED |
317 ACB_F_MESSAGE_WQBUFFER_READED);
318 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
319 INIT_LIST_HEAD(&acb->ccb_free_list);
320
321 error = arcmsr_alloc_ccb_pool(acb);
322 if (error)
323 goto out_iounmap;
324
325 error = request_irq(pdev->irq, arcmsr_do_interrupt,
326 SA_INTERRUPT | SA_SHIRQ, "arcmsr", acb);
327 if (error)
328 goto out_free_ccb_pool;
329
330 arcmsr_iop_init(acb);
331 pci_set_drvdata(pdev, host);
332
333 error = scsi_add_host(host, &pdev->dev);
334 if (error)
335 goto out_free_irq;
336
337 error = arcmsr_alloc_sysfs_attr(acb);
338 if (error)
339 goto out_free_sysfs;
340
341 scsi_scan_host(host);
342 return 0;
343 out_free_sysfs:
344 out_free_irq:
345 free_irq(pdev->irq, acb);
346 out_free_ccb_pool:
347 arcmsr_free_ccb_pool(acb);
348 out_iounmap:
349 iounmap(acb->pmu);
350 out_release_regions:
351 pci_release_regions(pdev);
352 out_host_put:
353 scsi_host_put(host);
354 out_disable_device:
355 pci_disable_device(pdev);
356 out:
357 return error;
358}
359
360static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
361{
362 struct MessageUnit __iomem *reg = acb->pmu;
363
364 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
365 if (arcmsr_wait_msgint_ready(acb))
366 printk(KERN_NOTICE
367 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
368 , acb->host->host_no);
369}
370
371static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
372{
373 struct AdapterControlBlock *acb = ccb->acb;
374 struct scsi_cmnd *pcmd = ccb->pcmd;
375
376 if (pcmd->use_sg != 0) {
377 struct scatterlist *sl;
378
379 sl = (struct scatterlist *)pcmd->request_buffer;
380 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
381 }
382 else if (pcmd->request_bufflen != 0)
383 pci_unmap_single(acb->pdev,
384 pcmd->SCp.dma_handle,
385 pcmd->request_bufflen, pcmd->sc_data_direction);
386}
387
388static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
389{
390 struct AdapterControlBlock *acb = ccb->acb;
391 struct scsi_cmnd *pcmd = ccb->pcmd;
392
393 arcmsr_pci_unmap_dma(ccb);
394 if (stand_flag == 1)
395 atomic_dec(&acb->ccboutstandingcount);
396 ccb->startdone = ARCMSR_CCB_DONE;
397 ccb->ccb_flags = 0;
398 list_add_tail(&ccb->list, &acb->ccb_free_list);
399 pcmd->scsi_done(pcmd);
400}
401
402static void arcmsr_remove(struct pci_dev *pdev)
403{
404 struct Scsi_Host *host = pci_get_drvdata(pdev);
405 struct AdapterControlBlock *acb =
406 (struct AdapterControlBlock *) host->hostdata;
407 struct MessageUnit __iomem *reg = acb->pmu;
408 int poll_count = 0;
409
410 arcmsr_free_sysfs_attr(acb);
411 scsi_remove_host(host);
412 arcmsr_stop_adapter_bgrb(acb);
413 arcmsr_flush_adapter_cache(acb);
414 writel(readl(&reg->outbound_intmask) |
415 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
416 &reg->outbound_intmask);
417 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
418 acb->acb_flags &= ~ACB_F_IOP_INITED;
419
420 for (poll_count = 0; poll_count < 256; poll_count++) {
421 if (!atomic_read(&acb->ccboutstandingcount))
422 break;
423 arcmsr_interrupt(acb);
424 msleep(25);
425 }
426
427 if (atomic_read(&acb->ccboutstandingcount)) {
428 int i;
429
430 arcmsr_abort_allcmd(acb);
431 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
432 readl(&reg->outbound_queueport);
433 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
434 struct CommandControlBlock *ccb = acb->pccb_pool[i];
435 if (ccb->startdone == ARCMSR_CCB_START) {
436 ccb->startdone = ARCMSR_CCB_ABORTED;
437 ccb->pcmd->result = DID_ABORT << 16;
438 arcmsr_ccb_complete(ccb, 1);
439 }
440 }
441 }
442
443 free_irq(pdev->irq, acb);
444 iounmap(acb->pmu);
445 arcmsr_free_ccb_pool(acb);
446 pci_release_regions(pdev);
447
448 scsi_host_put(host);
449
450 pci_disable_device(pdev);
451 pci_set_drvdata(pdev, NULL);
452}
453
454static void arcmsr_shutdown(struct pci_dev *pdev)
455{
456 struct Scsi_Host *host = pci_get_drvdata(pdev);
457 struct AdapterControlBlock *acb =
458 (struct AdapterControlBlock *)host->hostdata;
459
460 arcmsr_stop_adapter_bgrb(acb);
461 arcmsr_flush_adapter_cache(acb);
462}
463
464static int arcmsr_module_init(void)
465{
466 int error = 0;
467
468 error = pci_register_driver(&arcmsr_pci_driver);
469 return error;
470}
471
472static void arcmsr_module_exit(void)
473{
474 pci_unregister_driver(&arcmsr_pci_driver);
475}
476module_init(arcmsr_module_init);
477module_exit(arcmsr_module_exit);
478
479static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
480{
481 struct MessageUnit __iomem *reg = acb->pmu;
482 u32 orig_mask = readl(&reg->outbound_intmask);
483
484 writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
485 &reg->outbound_intmask);
486 return orig_mask;
487}
488
489static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
490 u32 orig_mask)
491{
492 struct MessageUnit __iomem *reg = acb->pmu;
493 u32 mask;
494
495 mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
496 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
497 writel(mask, &reg->outbound_intmask);
498}
499
500static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
501{
502 struct MessageUnit __iomem *reg=acb->pmu;
503
504 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
505 if (arcmsr_wait_msgint_ready(acb))
506 printk(KERN_NOTICE
507 "arcmsr%d: wait 'flush adapter cache' timeout \n"
508 , acb->host->host_no);
509}
510
511static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
512{
513 struct scsi_cmnd *pcmd = ccb->pcmd;
514 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
515
516 pcmd->result = DID_OK << 16;
517 if (sensebuffer) {
518 int sense_data_length =
519 sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
520 ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
521 memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
522 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
523 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
524 sensebuffer->Valid = 1;
525 }
526}
527
528static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
529{
530 struct MessageUnit __iomem *reg = acb->pmu;
531 uint32_t Index;
532 uint8_t Retries = 0x00;
533
534 do {
535 for (Index = 0; Index < 100; Index++) {
536 if (readl(&reg->outbound_intstatus)
537 & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
538 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
539 , &reg->outbound_intstatus);
540 return 0x00;
541 }
542 msleep_interruptible(10);
543 }/*max 1 seconds*/
544 } while (Retries++ < 20);/*max 20 sec*/
545 return 0xff;
546}
547
548static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
549 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
550{
551 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
552 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
553 uint32_t address_lo, address_hi;
554 int arccdbsize = 0x30;
555
556 ccb->pcmd = pcmd;
557 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
558 arcmsr_cdb->Bus = 0;
559 arcmsr_cdb->TargetID = pcmd->device->id;
560 arcmsr_cdb->LUN = pcmd->device->lun;
561 arcmsr_cdb->Function = 1;
562 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
563 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
564 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
565 if (pcmd->use_sg) {
566 int length, sgcount, i, cdb_sgcount = 0;
567 struct scatterlist *sl;
568
569 /* Get Scatter Gather List from scsiport. */
570 sl = (struct scatterlist *) pcmd->request_buffer;
571 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
572 pcmd->sc_data_direction);
573 /* map stor port SG list to our iop SG List. */
574 for (i = 0; i < sgcount; i++) {
575 /* Get the physical address of the current data pointer */
576 length = cpu_to_le32(sg_dma_len(sl));
577 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
578 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
579 if (address_hi == 0) {
580 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
581
582 pdma_sg->address = address_lo;
583 pdma_sg->length = length;
584 psge += sizeof (struct SG32ENTRY);
585 arccdbsize += sizeof (struct SG32ENTRY);
586 } else {
587 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
588
589 pdma_sg->addresshigh = address_hi;
590 pdma_sg->address = address_lo;
591 pdma_sg->length = length|IS_SG64_ADDR;
592 psge += sizeof (struct SG64ENTRY);
593 arccdbsize += sizeof (struct SG64ENTRY);
594 }
595 sl++;
596 cdb_sgcount++;
597 }
598 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
599 arcmsr_cdb->DataLength = pcmd->request_bufflen;
600 if ( arccdbsize > 256)
601 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
602 } else if (pcmd->request_bufflen) {
603 dma_addr_t dma_addr;
604 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
605 pcmd->request_bufflen, pcmd->sc_data_direction);
606 pcmd->SCp.dma_handle = dma_addr;
607 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
608 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
609 if (address_hi == 0) {
610 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
611 pdma_sg->address = address_lo;
612 pdma_sg->length = pcmd->request_bufflen;
613 } else {
614 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
615 pdma_sg->addresshigh = address_hi;
616 pdma_sg->address = address_lo;
617 pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
618 }
619 arcmsr_cdb->sgcount = 1;
620 arcmsr_cdb->DataLength = pcmd->request_bufflen;
621 }
622 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
623 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
624 ccb->ccb_flags |= CCB_FLAG_WRITE;
625 }
626}
627
628static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
629{
630 struct MessageUnit __iomem *reg = acb->pmu;
631 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
632 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
633
634 atomic_inc(&acb->ccboutstandingcount);
635 ccb->startdone = ARCMSR_CCB_START;
636 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
637 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
638 &reg->inbound_queueport);
639 else
640 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
641}
642
643void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
644{
645 struct MessageUnit __iomem *reg = acb->pmu;
646 struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
647 uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
648 int32_t allxfer_len = 0;
649
650 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
651 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
652 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
653 && (allxfer_len < 124)) {
654 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
655 acb->wqbuf_firstindex++;
656 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
657 iop_data++;
658 allxfer_len++;
659 }
660 writel(allxfer_len, &pwbuffer->data_len);
661 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
662 , &reg->inbound_doorbell);
663 }
664}
665
666static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
667{
668 struct MessageUnit __iomem *reg = acb->pmu;
669
670 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
671 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
672 if (arcmsr_wait_msgint_ready(acb))
673 printk(KERN_NOTICE
674 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
675 , acb->host->host_no);
676}
677
678static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
679{
680 dma_free_coherent(&acb->pdev->dev,
681 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
682 acb->dma_coherent,
683 acb->dma_coherent_handle);
684}
685
686static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
687{
688 struct MessageUnit __iomem *reg = acb->pmu;
689 struct CommandControlBlock *ccb;
690 uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
691
692 outbound_intstatus = readl(&reg->outbound_intstatus)
693 & acb->outbound_int_enable;
694 writel(outbound_intstatus, &reg->outbound_intstatus);
695 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
696 outbound_doorbell = readl(&reg->outbound_doorbell);
697 writel(outbound_doorbell, &reg->outbound_doorbell);
698 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
699 struct QBUFFER __iomem * prbuffer =
700 (struct QBUFFER __iomem *) &reg->message_rbuffer;
701 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
702 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
703
704 rqbuf_lastindex = acb->rqbuf_lastindex;
705 rqbuf_firstindex = acb->rqbuf_firstindex;
706 iop_len = readl(&prbuffer->data_len);
707 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
708 &(ARCMSR_MAX_QBUFFER - 1);
709 if (my_empty_len >= iop_len) {
710 while (iop_len > 0) {
711 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
712 acb->rqbuf_lastindex++;
713 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
714 iop_data++;
715 iop_len--;
716 }
717 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
718 &reg->inbound_doorbell);
719 } else
720 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
721 }
722 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
723 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
724 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
725 struct QBUFFER __iomem * pwbuffer =
726 (struct QBUFFER __iomem *) &reg->message_wbuffer;
727 uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
728 int32_t allxfer_len = 0;
729
730 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
731 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
732 && (allxfer_len < 124)) {
733 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
734 acb->wqbuf_firstindex++;
735 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
736 iop_data++;
737 allxfer_len++;
738 }
739 writel(allxfer_len, &pwbuffer->data_len);
740 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
741 &reg->inbound_doorbell);
742 }
743 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
744 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
745 }
746 }
747 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
748 int id, lun;
749 /*
750 ****************************************************************
751 ** areca cdb command done
752 ****************************************************************
753 */
754 while (1) {
755 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
756 break;/*chip FIFO no ccb for completion already*/
757 /* check if command done with no error*/
758 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
759 (flag_ccb << 5));
760 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
761 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
762 struct scsi_cmnd *abortcmd=ccb->pcmd;
763 if (abortcmd) {
764 abortcmd->result |= DID_ABORT >> 16;
765 arcmsr_ccb_complete(ccb, 1);
766 printk(KERN_NOTICE
767 "arcmsr%d: ccb='0x%p' isr got aborted command \n"
768 , acb->host->host_no, ccb);
769 }
770 continue;
771 }
772 printk(KERN_NOTICE
773 "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
774 "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
775 " ccboutstandingcount=%d \n"
776 , acb->host->host_no
777 , acb
778 , ccb
779 , ccb->acb
780 , ccb->startdone
781 , atomic_read(&acb->ccboutstandingcount));
782 continue;
783 }
784 id = ccb->pcmd->device->id;
785 lun = ccb->pcmd->device->lun;
786 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
787 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
788 acb->devstate[id][lun] = ARECA_RAID_GOOD;
789 ccb->pcmd->result = DID_OK << 16;
790 arcmsr_ccb_complete(ccb, 1);
791 } else {
792 switch(ccb->arcmsr_cdb.DeviceStatus) {
793 case ARCMSR_DEV_SELECT_TIMEOUT: {
794 acb->devstate[id][lun] = ARECA_RAID_GONE;
795 ccb->pcmd->result = DID_TIME_OUT << 16;
796 arcmsr_ccb_complete(ccb, 1);
797 }
798 break;
799 case ARCMSR_DEV_ABORTED:
800 case ARCMSR_DEV_INIT_FAIL: {
801 acb->devstate[id][lun] = ARECA_RAID_GONE;
802 ccb->pcmd->result = DID_BAD_TARGET << 16;
803 arcmsr_ccb_complete(ccb, 1);
804 }
805 break;
806 case ARCMSR_DEV_CHECK_CONDITION: {
807 acb->devstate[id][lun] = ARECA_RAID_GOOD;
808 arcmsr_report_sense_info(ccb);
809 arcmsr_ccb_complete(ccb, 1);
810 }
811 break;
812 default:
813 printk(KERN_NOTICE
814 "arcmsr%d: scsi id=%d lun=%d"
815 " isr get command error done,"
816 "but got unknown DeviceStatus = 0x%x \n"
817 , acb->host->host_no
818 , id
819 , lun
820 , ccb->arcmsr_cdb.DeviceStatus);
821 acb->devstate[id][lun] = ARECA_RAID_GONE;
822 ccb->pcmd->result = DID_NO_CONNECT << 16;
823 arcmsr_ccb_complete(ccb, 1);
824 break;
825 }
826 }
827 }/*drain reply FIFO*/
828 }
829 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
830 return IRQ_NONE;
831 return IRQ_HANDLED;
832}
833
834static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
835{
836 if (acb) {
837 /* stop adapter background rebuild */
838 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
839 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
840 arcmsr_stop_adapter_bgrb(acb);
841 arcmsr_flush_adapter_cache(acb);
842 }
843 }
844}
845
846static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
847{
848 struct MessageUnit __iomem *reg = acb->pmu;
849 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
850 int retvalue = 0, transfer_len = 0;
851 char *buffer;
852 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
853 (uint32_t ) cmd->cmnd[6] << 16 |
854 (uint32_t ) cmd->cmnd[7] << 8 |
855 (uint32_t ) cmd->cmnd[8];
856 /* 4 bytes: Areca io control code */
857 if (cmd->use_sg) {
858 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
859
860 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861 if (cmd->use_sg > 1) {
862 retvalue = ARCMSR_MESSAGE_FAIL;
863 goto message_out;
864 }
865 transfer_len += sg->length;
866 } else {
867 buffer = cmd->request_buffer;
868 transfer_len = cmd->request_bufflen;
869 }
870 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
871 retvalue = ARCMSR_MESSAGE_FAIL;
872 goto message_out;
873 }
874 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
875 switch(controlcode) {
876 case ARCMSR_MESSAGE_READ_RQBUFFER: {
877 unsigned long *ver_addr;
878 dma_addr_t buf_handle;
879 uint8_t *pQbuffer, *ptmpQbuffer;
880 int32_t allxfer_len = 0;
881
882 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
883 if (!ver_addr) {
884 retvalue = ARCMSR_MESSAGE_FAIL;
885 goto message_out;
886 }
887 ptmpQbuffer = (uint8_t *) ver_addr;
888 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
889 && (allxfer_len < 1031)) {
890 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
891 memcpy(ptmpQbuffer, pQbuffer, 1);
892 acb->rqbuf_firstindex++;
893 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
894 ptmpQbuffer++;
895 allxfer_len++;
896 }
897 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
898 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
899 &reg->message_rbuffer;
900 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
901 int32_t iop_len;
902
903 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
904 iop_len = readl(&prbuffer->data_len);
905 while (iop_len > 0) {
906 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
907 acb->rqbuf_lastindex++;
908 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
909 iop_data++;
910 iop_len--;
911 }
912 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
913 &reg->inbound_doorbell);
914 }
915 memcpy(pcmdmessagefld->messagedatabuffer,
916 (uint8_t *)ver_addr, allxfer_len);
917 pcmdmessagefld->cmdmessage.Length = allxfer_len;
918 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
919 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
920 }
921 break;
922 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
923 unsigned long *ver_addr;
924 dma_addr_t buf_handle;
925 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
926 uint8_t *pQbuffer, *ptmpuserbuffer;
927
928 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
929 if (!ver_addr) {
930 retvalue = ARCMSR_MESSAGE_FAIL;
931 goto message_out;
932 }
933 ptmpuserbuffer = (uint8_t *)ver_addr;
934 user_len = pcmdmessagefld->cmdmessage.Length;
935 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
936 wqbuf_lastindex = acb->wqbuf_lastindex;
937 wqbuf_firstindex = acb->wqbuf_firstindex;
938 if (wqbuf_lastindex != wqbuf_firstindex) {
939 struct SENSE_DATA *sensebuffer =
940 (struct SENSE_DATA *)cmd->sense_buffer;
941 arcmsr_post_Qbuffer(acb);
942 /* has error report sensedata */
943 sensebuffer->ErrorCode = 0x70;
944 sensebuffer->SenseKey = ILLEGAL_REQUEST;
945 sensebuffer->AdditionalSenseLength = 0x0A;
946 sensebuffer->AdditionalSenseCode = 0x20;
947 sensebuffer->Valid = 1;
948 retvalue = ARCMSR_MESSAGE_FAIL;
949 } else {
950 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
951 &(ARCMSR_MAX_QBUFFER - 1);
952 if (my_empty_len >= user_len) {
953 while (user_len > 0) {
954 pQbuffer =
955 &acb->wqbuffer[acb->wqbuf_lastindex];
956 memcpy(pQbuffer, ptmpuserbuffer, 1);
957 acb->wqbuf_lastindex++;
958 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
959 ptmpuserbuffer++;
960 user_len--;
961 }
962 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
963 acb->acb_flags &=
964 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
965 arcmsr_post_Qbuffer(acb);
966 }
967 } else {
968 /* has error report sensedata */
969 struct SENSE_DATA *sensebuffer =
970 (struct SENSE_DATA *)cmd->sense_buffer;
971 sensebuffer->ErrorCode = 0x70;
972 sensebuffer->SenseKey = ILLEGAL_REQUEST;
973 sensebuffer->AdditionalSenseLength = 0x0A;
974 sensebuffer->AdditionalSenseCode = 0x20;
975 sensebuffer->Valid = 1;
976 retvalue = ARCMSR_MESSAGE_FAIL;
977 }
978 }
979 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
980 }
981 break;
982 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
983 uint8_t *pQbuffer = acb->rqbuffer;
984
985 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
986 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
987 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
988 &reg->inbound_doorbell);
989 }
990 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
991 acb->rqbuf_firstindex = 0;
992 acb->rqbuf_lastindex = 0;
993 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
994 pcmdmessagefld->cmdmessage.ReturnCode =
995 ARCMSR_MESSAGE_RETURNCODE_OK;
996 }
997 break;
998 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
999 uint8_t *pQbuffer = acb->wqbuffer;
1000
1001 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1002 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1003 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1004 , &reg->inbound_doorbell);
1005 }
1006 acb->acb_flags |=
1007 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1008 ACB_F_MESSAGE_WQBUFFER_READED);
1009 acb->wqbuf_firstindex = 0;
1010 acb->wqbuf_lastindex = 0;
1011 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1012 pcmdmessagefld->cmdmessage.ReturnCode =
1013 ARCMSR_MESSAGE_RETURNCODE_OK;
1014 }
1015 break;
1016 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1017 uint8_t *pQbuffer;
1018
1019 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1020 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1021 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1022 , &reg->inbound_doorbell);
1023 }
1024 acb->acb_flags |=
1025 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1026 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1027 | ACB_F_MESSAGE_WQBUFFER_READED);
1028 acb->rqbuf_firstindex = 0;
1029 acb->rqbuf_lastindex = 0;
1030 acb->wqbuf_firstindex = 0;
1031 acb->wqbuf_lastindex = 0;
1032 pQbuffer = acb->rqbuffer;
1033 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1034 pQbuffer = acb->wqbuffer;
1035 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1036 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1037 }
1038 break;
1039 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1040 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1041 }
1042 break;
1043 case ARCMSR_MESSAGE_SAY_HELLO: {
1044 int8_t * hello_string = "Hello! I am ARCMSR";
1045
1046 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1047 , (int16_t)strlen(hello_string));
1048 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1049 }
1050 break;
1051 case ARCMSR_MESSAGE_SAY_GOODBYE:
1052 arcmsr_iop_parking(acb);
1053 break;
1054 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1055 arcmsr_flush_adapter_cache(acb);
1056 break;
1057 default:
1058 retvalue = ARCMSR_MESSAGE_FAIL;
1059 }
1060 message_out:
1061 if (cmd->use_sg) {
1062 struct scatterlist *sg;
1063
1064 sg = (struct scatterlist *) cmd->request_buffer;
1065 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1066 }
1067 return retvalue;
1068}
1069
1070static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1071{
1072 struct list_head *head = &acb->ccb_free_list;
1073 struct CommandControlBlock *ccb = NULL;
1074
1075 if (!list_empty(head)) {
1076 ccb = list_entry(head->next, struct CommandControlBlock, list);
1077 list_del(head->next);
1078 }
1079 return ccb;
1080}
1081
1082static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1083 struct scsi_cmnd *cmd)
1084{
1085 switch (cmd->cmnd[0]) {
1086 case INQUIRY: {
1087 unsigned char inqdata[36];
1088 char *buffer;
1089
1090 if (cmd->device->lun) {
1091 cmd->result = (DID_TIME_OUT << 16);
1092 cmd->scsi_done(cmd);
1093 return;
1094 }
1095 inqdata[0] = TYPE_PROCESSOR;
1096 /* Periph Qualifier & Periph Dev Type */
1097 inqdata[1] = 0;
1098 /* rem media bit & Dev Type Modifier */
1099 inqdata[2] = 0;
1100 /* ISO,ECMA,& ANSI versions */
1101 inqdata[4] = 31;
1102 /* length of additional data */
1103 strncpy(&inqdata[8], "Areca ", 8);
1104 /* Vendor Identification */
1105 strncpy(&inqdata[16], "RAID controller ", 16);
1106 /* Product Identification */
1107 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1108 if (cmd->use_sg) {
1109 struct scatterlist *sg;
1110
1111 sg = (struct scatterlist *) cmd->request_buffer;
1112 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1113 } else {
1114 buffer = cmd->request_buffer;
1115 }
1116 memcpy(buffer, inqdata, sizeof(inqdata));
1117 if (cmd->use_sg) {
1118 struct scatterlist *sg;
1119
1120 sg = (struct scatterlist *) cmd->request_buffer;
1121 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1122 }
1123 cmd->scsi_done(cmd);
1124 }
1125 break;
1126 case WRITE_BUFFER:
1127 case READ_BUFFER: {
1128 if (arcmsr_iop_message_xfer(acb, cmd))
1129 cmd->result = (DID_ERROR << 16);
1130 cmd->scsi_done(cmd);
1131 }
1132 break;
1133 default:
1134 cmd->scsi_done(cmd);
1135 }
1136}
1137
1138static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1139 void (* done)(struct scsi_cmnd *))
1140{
1141 struct Scsi_Host *host = cmd->device->host;
1142 struct AdapterControlBlock *acb =
1143 (struct AdapterControlBlock *) host->hostdata;
1144 struct CommandControlBlock *ccb;
1145 int target = cmd->device->id;
1146 int lun = cmd->device->lun;
1147
1148 cmd->scsi_done = done;
1149 cmd->host_scribble = NULL;
1150 cmd->result = 0;
1151 if (acb->acb_flags & ACB_F_BUS_RESET) {
1152 printk(KERN_NOTICE "arcmsr%d: bus reset"
1153 " and return busy \n"
1154 , acb->host->host_no);
1155 return SCSI_MLQUEUE_HOST_BUSY;
1156 }
1157 if(target == 16) {
1158 /* virtual device for iop message transfer */
1159 arcmsr_handle_virtual_command(acb, cmd);
1160 return 0;
1161 }
1162 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1163 uint8_t block_cmd;
1164
1165 block_cmd = cmd->cmnd[0] & 0x0f;
1166 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1167 printk(KERN_NOTICE
1168 "arcmsr%d: block 'read/write'"
1169 "command with gone raid volume"
1170 " Cmd=%2x, TargetId=%d, Lun=%d \n"
1171 , acb->host->host_no
1172 , cmd->cmnd[0]
1173 , target, lun);
1174 cmd->result = (DID_NO_CONNECT << 16);
1175 cmd->scsi_done(cmd);
1176 return 0;
1177 }
1178 }
1179 if (atomic_read(&acb->ccboutstandingcount) >=
1180 ARCMSR_MAX_OUTSTANDING_CMD)
1181 return SCSI_MLQUEUE_HOST_BUSY;
1182
1183 ccb = arcmsr_get_freeccb(acb);
1184 if (!ccb)
1185 return SCSI_MLQUEUE_HOST_BUSY;
1186 arcmsr_build_ccb(acb, ccb, cmd);
1187 arcmsr_post_ccb(acb, ccb);
1188 return 0;
1189}
1190
1191static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1192{
1193 struct MessageUnit __iomem *reg = acb->pmu;
1194 char *acb_firm_model = acb->firm_model;
1195 char *acb_firm_version = acb->firm_version;
1196 char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1197 char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1198 int count;
1199
1200 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1201 if (arcmsr_wait_msgint_ready(acb))
1202 printk(KERN_NOTICE
1203 "arcmsr%d: wait "
1204 "'get adapter firmware miscellaneous data' timeout \n"
1205 , acb->host->host_no);
1206 count = 8;
1207 while (count) {
1208 *acb_firm_model = readb(iop_firm_model);
1209 acb_firm_model++;
1210 iop_firm_model++;
1211 count--;
1212 }
1213 count = 16;
1214 while (count) {
1215 *acb_firm_version = readb(iop_firm_version);
1216 acb_firm_version++;
1217 iop_firm_version++;
1218 count--;
1219 }
1220 printk(KERN_INFO
1221 "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1222 , acb->host->host_no
1223 , acb->firm_version);
1224 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1225 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1226 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1227 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1228}
1229
1230static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1231 struct CommandControlBlock *poll_ccb)
1232{
1233 struct MessageUnit __iomem *reg = acb->pmu;
1234 struct CommandControlBlock *ccb;
1235 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1236 int id, lun;
1237
1238 polling_ccb_retry:
1239 poll_count++;
1240 outbound_intstatus = readl(&reg->outbound_intstatus)
1241 & acb->outbound_int_enable;
1242 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1243 while (1) {
1244 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1245 if (poll_ccb_done)
1246 break;
1247 else {
1248 msleep(25);
1249 if (poll_count > 100)
1250 break;
1251 goto polling_ccb_retry;
1252 }
1253 }
1254 ccb = (struct CommandControlBlock *)
1255 (acb->vir2phy_offset + (flag_ccb << 5));
1256 if ((ccb->acb != acb) ||
1257 (ccb->startdone != ARCMSR_CCB_START)) {
1258 if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1259 (ccb == poll_ccb)) {
1260 printk(KERN_NOTICE
1261 "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1262 " poll command abort successfully \n"
1263 , acb->host->host_no
1264 , ccb->pcmd->device->id
1265 , ccb->pcmd->device->lun
1266 , ccb);
1267 ccb->pcmd->result = DID_ABORT << 16;
1268 arcmsr_ccb_complete(ccb, 1);
1269 poll_ccb_done = 1;
1270 continue;
1271 }
1272 printk(KERN_NOTICE
1273 "arcmsr%d: polling get an illegal ccb"
1274 " command done ccb='0x%p'"
1275 "ccboutstandingcount=%d \n"
1276 , acb->host->host_no
1277 , ccb
1278 , atomic_read(&acb->ccboutstandingcount));
1279 continue;
1280 }
1281 id = ccb->pcmd->device->id;
1282 lun = ccb->pcmd->device->lun;
1283 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1284 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1285 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1286 ccb->pcmd->result = DID_OK << 16;
1287 arcmsr_ccb_complete(ccb, 1);
1288 } else {
1289 switch(ccb->arcmsr_cdb.DeviceStatus) {
1290 case ARCMSR_DEV_SELECT_TIMEOUT: {
1291 acb->devstate[id][lun] = ARECA_RAID_GONE;
1292 ccb->pcmd->result = DID_TIME_OUT << 16;
1293 arcmsr_ccb_complete(ccb, 1);
1294 }
1295 break;
1296 case ARCMSR_DEV_ABORTED:
1297 case ARCMSR_DEV_INIT_FAIL: {
1298 acb->devstate[id][lun] = ARECA_RAID_GONE;
1299 ccb->pcmd->result = DID_BAD_TARGET << 16;
1300 arcmsr_ccb_complete(ccb, 1);
1301 }
1302 break;
1303 case ARCMSR_DEV_CHECK_CONDITION: {
1304 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1305 arcmsr_report_sense_info(ccb);
1306 arcmsr_ccb_complete(ccb, 1);
1307 }
1308 break;
1309 default:
1310 printk(KERN_NOTICE
1311 "arcmsr%d: scsi id=%d lun=%d"
1312 " polling and getting command error done"
1313 "but got unknown DeviceStatus = 0x%x \n"
1314 , acb->host->host_no
1315 , id
1316 , lun
1317 , ccb->arcmsr_cdb.DeviceStatus);
1318 acb->devstate[id][lun] = ARECA_RAID_GONE;
1319 ccb->pcmd->result = DID_BAD_TARGET << 16;
1320 arcmsr_ccb_complete(ccb, 1);
1321 break;
1322 }
1323 }
1324 }
1325}
1326
1327static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1328{
1329 struct MessageUnit __iomem *reg = acb->pmu;
1330 uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1331
1332 do {
1333 firmware_state = readl(&reg->outbound_msgaddr1);
1334 } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1335 intmask_org = readl(&reg->outbound_intmask)
1336 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1337 arcmsr_get_firmware_spec(acb);
1338
1339 acb->acb_flags |= ACB_F_MSG_START_BGRB;
1340 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1341 if (arcmsr_wait_msgint_ready(acb)) {
1342 printk(KERN_NOTICE "arcmsr%d: "
1343 "wait 'start adapter background rebulid' timeout\n",
1344 acb->host->host_no);
1345 }
1346
1347 outbound_doorbell = readl(&reg->outbound_doorbell);
1348 writel(outbound_doorbell, &reg->outbound_doorbell);
1349 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1350 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1351 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1352 writel(intmask_org & mask, &reg->outbound_intmask);
1353 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1354 acb->acb_flags |= ACB_F_IOP_INITED;
1355}
1356
1357static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1358{
1359 struct MessageUnit __iomem *reg = acb->pmu;
1360 struct CommandControlBlock *ccb;
1361 uint32_t intmask_org;
1362 int i = 0;
1363
1364 if (atomic_read(&acb->ccboutstandingcount) != 0) {
1365 /* talk to iop 331 outstanding command aborted */
1366 arcmsr_abort_allcmd(acb);
1367 /* wait for 3 sec for all command aborted*/
1368 msleep_interruptible(3000);
1369 /* disable all outbound interrupt */
1370 intmask_org = arcmsr_disable_outbound_ints(acb);
1371 /* clear all outbound posted Q */
1372 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1373 readl(&reg->outbound_queueport);
1374 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1375 ccb = acb->pccb_pool[i];
1376 if ((ccb->startdone == ARCMSR_CCB_START) ||
1377 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1378 ccb->startdone = ARCMSR_CCB_ABORTED;
1379 ccb->pcmd->result = DID_ABORT << 16;
1380 arcmsr_ccb_complete(ccb, 1);
1381 }
1382 }
1383 /* enable all outbound interrupt */
1384 arcmsr_enable_outbound_ints(acb, intmask_org);
1385 }
1386 atomic_set(&acb->ccboutstandingcount, 0);
1387}
1388
1389static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1390{
1391 struct AdapterControlBlock *acb =
1392 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1393 int i;
1394
1395 acb->num_resets++;
1396 acb->acb_flags |= ACB_F_BUS_RESET;
1397 for (i = 0; i < 400; i++) {
1398 if (!atomic_read(&acb->ccboutstandingcount))
1399 break;
1400 arcmsr_interrupt(acb);
1401 msleep(25);
1402 }
1403 arcmsr_iop_reset(acb);
1404 acb->acb_flags &= ~ACB_F_BUS_RESET;
1405 return SUCCESS;
1406}
1407
1408static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1409 struct CommandControlBlock *ccb)
1410{
1411 u32 intmask;
1412
1413 ccb->startdone = ARCMSR_CCB_ABORTED;
1414
1415 /*
1416 ** Wait for 3 sec for all command done.
1417 */
1418 msleep_interruptible(3000);
1419
1420 intmask = arcmsr_disable_outbound_ints(acb);
1421 arcmsr_polling_ccbdone(acb, ccb);
1422 arcmsr_enable_outbound_ints(acb, intmask);
1423}
1424
1425static int arcmsr_abort(struct scsi_cmnd *cmd)
1426{
1427 struct AdapterControlBlock *acb =
1428 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1429 int i = 0;
1430
1431 printk(KERN_NOTICE
1432 "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1433 acb->host->host_no, cmd->device->id, cmd->device->lun);
1434 acb->num_aborts++;
1435
1436 /*
1437 ************************************************
1438 ** the all interrupt service routine is locked
1439 ** we need to handle it as soon as possible and exit
1440 ************************************************
1441 */
1442 if (!atomic_read(&acb->ccboutstandingcount))
1443 return SUCCESS;
1444
1445 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1446 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1447 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1448 arcmsr_abort_one_cmd(acb, ccb);
1449 break;
1450 }
1451 }
1452
1453 return SUCCESS;
1454}
1455
1456static const char *arcmsr_info(struct Scsi_Host *host)
1457{
1458 struct AdapterControlBlock *acb =
1459 (struct AdapterControlBlock *) host->hostdata;
1460 static char buf[256];
1461 char *type;
1462 int raid6 = 1;
1463
1464 switch (acb->pdev->device) {
1465 case PCI_DEVICE_ID_ARECA_1110:
1466 case PCI_DEVICE_ID_ARECA_1210:
1467 raid6 = 0;
1468 /*FALLTHRU*/
1469 case PCI_DEVICE_ID_ARECA_1120:
1470 case PCI_DEVICE_ID_ARECA_1130:
1471 case PCI_DEVICE_ID_ARECA_1160:
1472 case PCI_DEVICE_ID_ARECA_1170:
1473 case PCI_DEVICE_ID_ARECA_1220:
1474 case PCI_DEVICE_ID_ARECA_1230:
1475 case PCI_DEVICE_ID_ARECA_1260:
1476 case PCI_DEVICE_ID_ARECA_1270:
1477 case PCI_DEVICE_ID_ARECA_1280:
1478 type = "SATA";
1479 break;
1480 case PCI_DEVICE_ID_ARECA_1380:
1481 case PCI_DEVICE_ID_ARECA_1381:
1482 case PCI_DEVICE_ID_ARECA_1680:
1483 case PCI_DEVICE_ID_ARECA_1681:
1484 type = "SAS";
1485 break;
1486 default:
1487 type = "X-TYPE";
1488 break;
1489 }
1490 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
1491 type, raid6 ? "( RAID6 capable)" : "",
1492 ARCMSR_DRIVER_VERSION);
1493 return buf;
1494}
1495
1496
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index 06d7601cdf56..d006a8cb4a74 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -69,6 +69,7 @@ comment "The following drivers are not fully supported"
69config SCSI_CUMANA_1 69config SCSI_CUMANA_1
70 tristate "CumanaSCSI I support (EXPERIMENTAL)" 70 tristate "CumanaSCSI I support (EXPERIMENTAL)"
71 depends on ARCH_ACORN && EXPERIMENTAL && SCSI 71 depends on ARCH_ACORN && EXPERIMENTAL && SCSI
72 select SCSI_SPI_ATTRS
72 help 73 help
73 This enables support for the Cumana SCSI I card. If you have an 74 This enables support for the Cumana SCSI I card. If you have an
74 Acorn system with one of these, say Y. If unsure, say N. 75 Acorn system with one of these, say Y. If unsure, say N.
@@ -76,6 +77,7 @@ config SCSI_CUMANA_1
76config SCSI_ECOSCSI 77config SCSI_ECOSCSI
77 tristate "EcoScsi support (EXPERIMENTAL)" 78 tristate "EcoScsi support (EXPERIMENTAL)"
78 depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI 79 depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI
80 select SCSI_SPI_ATTRS
79 help 81 help
80 This enables support for the EcoSCSI card -- a small card that sits 82 This enables support for the EcoSCSI card -- a small card that sits
81 in the Econet socket. If you have an Acorn system with one of these, 83 in the Econet socket. If you have an Acorn system with one of these,
@@ -84,6 +86,7 @@ config SCSI_ECOSCSI
84config SCSI_OAK1 86config SCSI_OAK1
85 tristate "Oak SCSI support (EXPERIMENTAL)" 87 tristate "Oak SCSI support (EXPERIMENTAL)"
86 depends on ARCH_ACORN && EXPERIMENTAL && SCSI 88 depends on ARCH_ACORN && EXPERIMENTAL && SCSI
89 select SCSI_SPI_ATTRS
87 help 90 help
88 This enables support for the Oak SCSI card. If you have an Acorn 91 This enables support for the Oak SCSI card. If you have an Acorn
89 system with one of these, say Y. If unsure, say N. 92 system with one of these, say Y. If unsure, say N.
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index dda5a5f79c53..7621e3fa37b1 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -129,7 +129,6 @@
129#define STRx(x) STRINGIFY(x) 129#define STRx(x) STRINGIFY(x)
130#define NO_WRITE_STR STRx(NO_WRITE) 130#define NO_WRITE_STR STRx(NO_WRITE)
131 131
132#include <linux/config.h>
133#include <linux/module.h> 132#include <linux/module.h>
134#include <linux/kernel.h> 133#include <linux/kernel.h>
135#include <linux/sched.h> 134#include <linux/sched.h>
@@ -3031,7 +3030,7 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
3031 if (!request_region(host->io_port, 2048, "acornscsi(ram)")) 3030 if (!request_region(host->io_port, 2048, "acornscsi(ram)"))
3032 goto err_5; 3031 goto err_5;
3033 3032
3034 ret = request_irq(host->irq, acornscsi_intr, SA_INTERRUPT, "acornscsi", ashost); 3033 ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
3035 if (ret) { 3034 if (ret) {
3036 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", 3035 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
3037 host->host_no, ashost->scsi.irq, ret); 3036 host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index e6c9491dc5c0..3bdfc36481ae 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -277,7 +277,7 @@ cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
277 ((struct NCR5380_hostdata *)host->hostdata)->ctrl = 0; 277 ((struct NCR5380_hostdata *)host->hostdata)->ctrl = 0;
278 outb(0x00, host->io_port - 577); 278 outb(0x00, host->io_port - 577);
279 279
280 ret = request_irq(host->irq, cumanascsi_intr, SA_INTERRUPT, 280 ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
281 "CumanaSCSI-1", host); 281 "CumanaSCSI-1", host);
282 if (ret) { 282 if (ret) {
283 printk("scsi%d: IRQ%d not free: %d\n", 283 printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index fad2109268bb..719af0dcc0e5 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -460,7 +460,7 @@ cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id)
460 goto out_free; 460 goto out_free;
461 461
462 ret = request_irq(ec->irq, cumanascsi_2_intr, 462 ret = request_irq(ec->irq, cumanascsi_2_intr,
463 SA_INTERRUPT, "cumanascsi2", info); 463 IRQF_DISABLED, "cumanascsi2", info);
464 if (ret) { 464 if (ret) {
465 printk("scsi%d: IRQ%d not free: %d\n", 465 printk("scsi%d: IRQ%d not free: %d\n",
466 host->host_no, ec->irq, ret); 466 host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2427 info->stats.aborts += 1; 2427 info->stats.aborts += 1;
2428 2428
2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
2430 __scsi_print_command(SCpnt->data_cmnd); 2430 __scsi_print_command(SCpnt->cmnd);
2431 2431
2432 print_debug_list(); 2432 print_debug_list();
2433 fas216_dumpstate(info); 2433 fas216_dumpstate(info);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 3d69f6c45a6b..b2c346a47052 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -373,7 +373,7 @@ powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
373 goto out_free; 373 goto out_free;
374 374
375 ret = request_irq(ec->irq, powertecscsi_intr, 375 ret = request_irq(ec->irq, powertecscsi_intr,
376 SA_INTERRUPT, "powertec", info); 376 IRQF_DISABLED, "powertec", info);
377 if (ret) { 377 if (ret) {
378 printk("scsi%d: IRQ%d not free: %d\n", 378 printk("scsi%d: IRQ%d not free: %d\n",
379 host->host_no, ec->irq, ret); 379 host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 6dd544a5eb56..8c2600ffc6af 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt)
74 unsigned long len = 0; 74 unsigned long len = 0;
75 int buf; 75 int buf;
76 76
77 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer; 77 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
78 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 78 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
79 SCpnt->SCp.ptr = (char *) 79 SCpnt->SCp.ptr = (char *)
80 (page_address(SCpnt->SCp.buffer->page) + 80 (page_address(SCpnt->SCp.buffer->page) +
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
deleted file mode 100644
index 521b718763f6..000000000000
--- a/drivers/scsi/ata_piix.c
+++ /dev/null
@@ -1,904 +0,0 @@
1/*
2 * ata_piix.c - Intel PATA/SATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 *
9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik
11 *
12 *
13 * Copyright header from piix.c:
14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
40 * Documentation
41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to
44 * PIIX4. Older device documentation is now a bit tricky to find.
45 *
46 * The chipsets all follow very much the same design. The orginal Triton
47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers.
53 *
54 * Errata of note:
55 *
56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel
60 *
61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 *
76 * Should have been BIOS fixed:
77 * 450NX: errata #19 - DMA hangs on old 450NX
78 * 450NX: errata #20 - DMA hangs on old 450NX
79 * 450NX: errata #25 - Corruption with DMA on old 450NX
80 * ICH3 errata #15 - IDE deadlock under high load
81 * (BIOS must set dev 31 fn 0 bit 23)
82 * ICH3 errata #18 - Don't use native mode
83 */
84
85#include <linux/kernel.h>
86#include <linux/module.h>
87#include <linux/pci.h>
88#include <linux/init.h>
89#include <linux/blkdev.h>
90#include <linux/delay.h>
91#include <linux/device.h>
92#include <scsi/scsi_host.h>
93#include <linux/libata.h>
94
95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "1.10"
97
98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
100 ICH5_PMR = 0x90, /* port mapping register */
101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */
103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
111
112 /* combined mode. if set, PATA is channel 0.
113 * if clear, PATA is channel 1.
114 */
115 PIIX_PORT_ENABLED = (1 << 0),
116 PIIX_PORT_PRESENT = (1 << 4),
117
118 PIIX_80C_PRI = (1 << 5) | (1 << 4),
119 PIIX_80C_SEC = (1 << 7) | (1 << 6),
120
121 /* controller IDs */
122 piix4_pata = 0,
123 ich5_pata = 1,
124 ich5_sata = 2,
125 esb_sata = 3,
126 ich6_sata = 4,
127 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6,
129
130 /* constants for mapping table */
131 P0 = 0, /* port 0 */
132 P1 = 1, /* port 1 */
133 P2 = 2, /* port 2 */
134 P3 = 3, /* port 3 */
135 IDE = -1, /* IDE */
136 NA = -2, /* not avaliable */
137 RV = -3, /* reserved */
138
139 PIIX_AHCI_DEVICE = 6,
140};
141
142struct piix_map_db {
143 const u32 mask;
144 const int map[][4];
145};
146
147static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent);
149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap);
152static void piix_sata_error_handler(struct ata_port *ap);
153
154static unsigned int in_module_init = 1;
155
156static const struct pci_device_id piix_pci_tbl[] = {
157#ifdef ATA_ENABLE_PATA
158 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
159 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
160 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
162#endif
163
164 /* NOTE: The following PCI ids must be kept in sync with the
165 * list in drivers/pci/quirks.c.
166 */
167
168 /* 82801EB (ICH5) */
169 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
170 /* 82801EB (ICH5) */
171 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
172 /* 6300ESB (ICH5 variant with broken PCS present bits) */
173 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
174 /* 6300ESB pretending RAID */
175 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
176 /* 82801FB/FW (ICH6/ICH6W) */
177 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
178 /* 82801FR/FRW (ICH6R/ICH6RW) */
179 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
180 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
181 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
182 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
183 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
184 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
185 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
186 /* Enterprise Southbridge 2 (where's the datasheet?) */
187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */
191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
192 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
194
195 { } /* terminate list */
196};
197
198static struct pci_driver piix_pci_driver = {
199 .name = DRV_NAME,
200 .id_table = piix_pci_tbl,
201 .probe = piix_init_one,
202 .remove = ata_pci_remove_one,
203 .suspend = ata_pci_device_suspend,
204 .resume = ata_pci_device_resume,
205};
206
207static struct scsi_host_template piix_sht = {
208 .module = THIS_MODULE,
209 .name = DRV_NAME,
210 .ioctl = ata_scsi_ioctl,
211 .queuecommand = ata_scsi_queuecmd,
212 .can_queue = ATA_DEF_QUEUE,
213 .this_id = ATA_SHT_THIS_ID,
214 .sg_tablesize = LIBATA_MAX_PRD,
215 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
216 .emulated = ATA_SHT_EMULATED,
217 .use_clustering = ATA_SHT_USE_CLUSTERING,
218 .proc_name = DRV_NAME,
219 .dma_boundary = ATA_DMA_BOUNDARY,
220 .slave_configure = ata_scsi_slave_config,
221 .slave_destroy = ata_scsi_slave_destroy,
222 .bios_param = ata_std_bios_param,
223 .resume = ata_scsi_device_resume,
224 .suspend = ata_scsi_device_suspend,
225};
226
227static const struct ata_port_operations piix_pata_ops = {
228 .port_disable = ata_port_disable,
229 .set_piomode = piix_set_piomode,
230 .set_dmamode = piix_set_dmamode,
231 .mode_filter = ata_pci_default_filter,
232
233 .tf_load = ata_tf_load,
234 .tf_read = ata_tf_read,
235 .check_status = ata_check_status,
236 .exec_command = ata_exec_command,
237 .dev_select = ata_std_dev_select,
238
239 .bmdma_setup = ata_bmdma_setup,
240 .bmdma_start = ata_bmdma_start,
241 .bmdma_stop = ata_bmdma_stop,
242 .bmdma_status = ata_bmdma_status,
243 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot,
245 .data_xfer = ata_pio_data_xfer,
246
247 .freeze = ata_bmdma_freeze,
248 .thaw = ata_bmdma_thaw,
249 .error_handler = piix_pata_error_handler,
250 .post_internal_cmd = ata_bmdma_post_internal_cmd,
251
252 .irq_handler = ata_interrupt,
253 .irq_clear = ata_bmdma_irq_clear,
254
255 .port_start = ata_port_start,
256 .port_stop = ata_port_stop,
257 .host_stop = ata_host_stop,
258};
259
260static const struct ata_port_operations piix_sata_ops = {
261 .port_disable = ata_port_disable,
262
263 .tf_load = ata_tf_load,
264 .tf_read = ata_tf_read,
265 .check_status = ata_check_status,
266 .exec_command = ata_exec_command,
267 .dev_select = ata_std_dev_select,
268
269 .bmdma_setup = ata_bmdma_setup,
270 .bmdma_start = ata_bmdma_start,
271 .bmdma_stop = ata_bmdma_stop,
272 .bmdma_status = ata_bmdma_status,
273 .qc_prep = ata_qc_prep,
274 .qc_issue = ata_qc_issue_prot,
275 .data_xfer = ata_pio_data_xfer,
276
277 .freeze = ata_bmdma_freeze,
278 .thaw = ata_bmdma_thaw,
279 .error_handler = piix_sata_error_handler,
280 .post_internal_cmd = ata_bmdma_post_internal_cmd,
281
282 .irq_handler = ata_interrupt,
283 .irq_clear = ata_bmdma_irq_clear,
284
285 .port_start = ata_port_start,
286 .port_stop = ata_port_stop,
287 .host_stop = ata_host_stop,
288};
289
290static struct piix_map_db ich5_map_db = {
291 .mask = 0x7,
292 .map = {
293 /* PM PS SM SS MAP */
294 { P0, NA, P1, NA }, /* 000b */
295 { P1, NA, P0, NA }, /* 001b */
296 { RV, RV, RV, RV },
297 { RV, RV, RV, RV },
298 { P0, P1, IDE, IDE }, /* 100b */
299 { P1, P0, IDE, IDE }, /* 101b */
300 { IDE, IDE, P0, P1 }, /* 110b */
301 { IDE, IDE, P1, P0 }, /* 111b */
302 },
303};
304
305static struct piix_map_db ich6_map_db = {
306 .mask = 0x3,
307 .map = {
308 /* PM PS SM SS MAP */
309 { P0, P2, P1, P3 }, /* 00b */
310 { IDE, IDE, P1, P3 }, /* 01b */
311 { P0, P2, IDE, IDE }, /* 10b */
312 { RV, RV, RV, RV },
313 },
314};
315
316static struct piix_map_db ich6m_map_db = {
317 .mask = 0x3,
318 .map = {
319 /* PM PS SM SS MAP */
320 { P0, P2, RV, RV }, /* 00b */
321 { RV, RV, RV, RV },
322 { P0, P2, IDE, IDE }, /* 10b */
323 { RV, RV, RV, RV },
324 },
325};
326
327static struct ata_port_info piix_port_info[] = {
328 /* piix4_pata */
329 {
330 .sht = &piix_sht,
331 .host_flags = ATA_FLAG_SLAVE_POSS,
332 .pio_mask = 0x1f, /* pio0-4 */
333#if 0
334 .mwdma_mask = 0x06, /* mwdma1-2 */
335#else
336 .mwdma_mask = 0x00, /* mwdma broken */
337#endif
338 .udma_mask = ATA_UDMA_MASK_40C,
339 .port_ops = &piix_pata_ops,
340 },
341
342 /* ich5_pata */
343 {
344 .sht = &piix_sht,
345 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
346 .pio_mask = 0x1f, /* pio0-4 */
347#if 0
348 .mwdma_mask = 0x06, /* mwdma1-2 */
349#else
350 .mwdma_mask = 0x00, /* mwdma broken */
351#endif
352 .udma_mask = 0x3f, /* udma0-5 */
353 .port_ops = &piix_pata_ops,
354 },
355
356 /* ich5_sata */
357 {
358 .sht = &piix_sht,
359 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
360 PIIX_FLAG_CHECKINTR,
361 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x7f, /* udma0-6 */
364 .port_ops = &piix_sata_ops,
365 .private_data = &ich5_map_db,
366 },
367
368 /* i6300esb_sata */
369 {
370 .sht = &piix_sht,
371 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
372 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
373 .pio_mask = 0x1f, /* pio0-4 */
374 .mwdma_mask = 0x07, /* mwdma0-2 */
375 .udma_mask = 0x7f, /* udma0-6 */
376 .port_ops = &piix_sata_ops,
377 .private_data = &ich5_map_db,
378 },
379
380 /* ich6_sata */
381 {
382 .sht = &piix_sht,
383 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
384 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
385 .pio_mask = 0x1f, /* pio0-4 */
386 .mwdma_mask = 0x07, /* mwdma0-2 */
387 .udma_mask = 0x7f, /* udma0-6 */
388 .port_ops = &piix_sata_ops,
389 .private_data = &ich6_map_db,
390 },
391
392 /* ich6_sata_ahci */
393 {
394 .sht = &piix_sht,
395 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
396 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
397 PIIX_FLAG_AHCI,
398 .pio_mask = 0x1f, /* pio0-4 */
399 .mwdma_mask = 0x07, /* mwdma0-2 */
400 .udma_mask = 0x7f, /* udma0-6 */
401 .port_ops = &piix_sata_ops,
402 .private_data = &ich6_map_db,
403 },
404
405 /* ich6m_sata_ahci */
406 {
407 .sht = &piix_sht,
408 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
409 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
410 PIIX_FLAG_AHCI,
411 .pio_mask = 0x1f, /* pio0-4 */
412 .mwdma_mask = 0x07, /* mwdma0-2 */
413 .udma_mask = 0x7f, /* udma0-6 */
414 .port_ops = &piix_sata_ops,
415 .private_data = &ich6m_map_db,
416 },
417};
418
419static struct pci_bits piix_enable_bits[] = {
420 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
421 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
422};
423
424MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
425MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
426MODULE_LICENSE("GPL");
427MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
428MODULE_VERSION(DRV_VERSION);
429
430/**
431 * piix_pata_cbl_detect - Probe host controller cable detect info
432 * @ap: Port for which cable detect info is desired
433 *
434 * Read 80c cable indicator from ATA PCI device's PCI config
435 * register. This register is normally set by firmware (BIOS).
436 *
437 * LOCKING:
438 * None (inherited from caller).
439 */
440static void piix_pata_cbl_detect(struct ata_port *ap)
441{
442 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
443 u8 tmp, mask;
444
445 /* no 80c support in host controller? */
446 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
447 goto cbl40;
448
449 /* check BIOS cable detect results */
450 mask = ap->hard_port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
451 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
452 if ((tmp & mask) == 0)
453 goto cbl40;
454
455 ap->cbl = ATA_CBL_PATA80;
456 return;
457
458cbl40:
459 ap->cbl = ATA_CBL_PATA40;
460 ap->udma_mask &= ATA_UDMA_MASK_40C;
461}
462
463/**
464 * piix_pata_prereset - prereset for PATA host controller
465 * @ap: Target port
466 *
467 * Prereset including cable detection.
468 *
469 * LOCKING:
470 * None (inherited from caller).
471 */
472static int piix_pata_prereset(struct ata_port *ap)
473{
474 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
475
476 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
477 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
478 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
479 return 0;
480 }
481
482 piix_pata_cbl_detect(ap);
483
484 return ata_std_prereset(ap);
485}
486
487static void piix_pata_error_handler(struct ata_port *ap)
488{
489 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
490 ata_std_postreset);
491}
492
493/**
494 * piix_sata_prereset - prereset for SATA host controller
495 * @ap: Target port
496 *
497 * Reads and configures SATA PCI device's PCI config register
498 * Port Configuration and Status (PCS) to determine port and
499 * device availability. Return -ENODEV to skip reset if no
500 * device is present.
501 *
502 * LOCKING:
503 * None (inherited from caller).
504 *
505 * RETURNS:
506 * 0 if device is present, -ENODEV otherwise.
507 */
508static int piix_sata_prereset(struct ata_port *ap)
509{
510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
511 const unsigned int *map = ap->host_set->private_data;
512 int base = 2 * ap->hard_port_no;
513 unsigned int present_mask = 0;
514 int port, i;
515 u8 pcs;
516
517 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
518 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
519
520 /* enable all ports on this ap and wait for them to settle */
521 for (i = 0; i < 2; i++) {
522 port = map[base + i];
523 if (port >= 0)
524 pcs |= 1 << port;
525 }
526
527 pci_write_config_byte(pdev, ICH5_PCS, pcs);
528 msleep(100);
529
530 /* let's see which devices are present */
531 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
532
533 for (i = 0; i < 2; i++) {
534 port = map[base + i];
535 if (port < 0)
536 continue;
537 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
538 present_mask |= 1 << i;
539 else
540 pcs &= ~(1 << port);
541 }
542
543 /* disable offline ports on non-AHCI controllers */
544 if (!(ap->flags & PIIX_FLAG_AHCI))
545 pci_write_config_byte(pdev, ICH5_PCS, pcs);
546
547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
548 ap->id, pcs, present_mask);
549
550 if (!present_mask) {
551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
553 return 0;
554 }
555
556 return ata_std_prereset(ap);
557}
558
559static void piix_sata_error_handler(struct ata_port *ap)
560{
561 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
562 ata_std_postreset);
563}
564
565/**
566 * piix_set_piomode - Initialize host controller PATA PIO timings
567 * @ap: Port whose timings we are configuring
568 * @adev: um
569 *
570 * Set PIO mode for device, in host controller PCI config space.
571 *
572 * LOCKING:
573 * None (inherited from caller).
574 */
575
576static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
577{
578 unsigned int pio = adev->pio_mode - XFER_PIO_0;
579 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
580 unsigned int is_slave = (adev->devno != 0);
581 unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40;
582 unsigned int slave_port = 0x44;
583 u16 master_data;
584 u8 slave_data;
585
586 static const /* ISP RTC */
587 u8 timings[][2] = { { 0, 0 },
588 { 0, 0 },
589 { 1, 0 },
590 { 2, 1 },
591 { 2, 3 }, };
592
593 pci_read_config_word(dev, master_port, &master_data);
594 if (is_slave) {
595 master_data |= 0x4000;
596 /* enable PPE, IE and TIME */
597 master_data |= 0x0070;
598 pci_read_config_byte(dev, slave_port, &slave_data);
599 slave_data &= (ap->hard_port_no ? 0x0f : 0xf0);
600 slave_data |=
601 (timings[pio][0] << 2) |
602 (timings[pio][1] << (ap->hard_port_no ? 4 : 0));
603 } else {
604 master_data &= 0xccf8;
605 /* enable PPE, IE and TIME */
606 master_data |= 0x0007;
607 master_data |=
608 (timings[pio][0] << 12) |
609 (timings[pio][1] << 8);
610 }
611 pci_write_config_word(dev, master_port, master_data);
612 if (is_slave)
613 pci_write_config_byte(dev, slave_port, slave_data);
614}
615
616/**
617 * piix_set_dmamode - Initialize host controller PATA PIO timings
618 * @ap: Port whose timings we are configuring
619 * @adev: um
620 * @udma: udma mode, 0 - 6
621 *
622 * Set UDMA mode for device, in host controller PCI config space.
623 *
624 * LOCKING:
625 * None (inherited from caller).
626 */
627
628static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
629{
630 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
631 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
632 u8 maslave = ap->hard_port_no ? 0x42 : 0x40;
633 u8 speed = udma;
634 unsigned int drive_dn = (ap->hard_port_no ? 2 : 0) + adev->devno;
635 int a_speed = 3 << (drive_dn * 4);
636 int u_flag = 1 << drive_dn;
637 int v_flag = 0x01 << drive_dn;
638 int w_flag = 0x10 << drive_dn;
639 int u_speed = 0;
640 int sitre;
641 u16 reg4042, reg4a;
642 u8 reg48, reg54, reg55;
643
644 pci_read_config_word(dev, maslave, &reg4042);
645 DPRINTK("reg4042 = 0x%04x\n", reg4042);
646 sitre = (reg4042 & 0x4000) ? 1 : 0;
647 pci_read_config_byte(dev, 0x48, &reg48);
648 pci_read_config_word(dev, 0x4a, &reg4a);
649 pci_read_config_byte(dev, 0x54, &reg54);
650 pci_read_config_byte(dev, 0x55, &reg55);
651
652 switch(speed) {
653 case XFER_UDMA_4:
654 case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
655 case XFER_UDMA_6:
656 case XFER_UDMA_5:
657 case XFER_UDMA_3:
658 case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
659 case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
660 case XFER_MW_DMA_2:
661 case XFER_MW_DMA_1: break;
662 default:
663 BUG();
664 return;
665 }
666
667 if (speed >= XFER_UDMA_0) {
668 if (!(reg48 & u_flag))
669 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
670 if (speed == XFER_UDMA_5) {
671 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
672 } else {
673 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
674 }
675 if ((reg4a & a_speed) != u_speed)
676 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
677 if (speed > XFER_UDMA_2) {
678 if (!(reg54 & v_flag))
679 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
680 } else
681 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
682 } else {
683 if (reg48 & u_flag)
684 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
685 if (reg4a & a_speed)
686 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
687 if (reg54 & v_flag)
688 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
689 if (reg55 & w_flag)
690 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
691 }
692}
693
694#define AHCI_PCI_BAR 5
695#define AHCI_GLOBAL_CTL 0x04
696#define AHCI_ENABLE (1 << 31)
697static int piix_disable_ahci(struct pci_dev *pdev)
698{
699 void __iomem *mmio;
700 u32 tmp;
701 int rc = 0;
702
703 /* BUG: pci_enable_device has not yet been called. This
704 * works because this device is usually set up by BIOS.
705 */
706
707 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
708 !pci_resource_len(pdev, AHCI_PCI_BAR))
709 return 0;
710
711 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
712 if (!mmio)
713 return -ENOMEM;
714
715 tmp = readl(mmio + AHCI_GLOBAL_CTL);
716 if (tmp & AHCI_ENABLE) {
717 tmp &= ~AHCI_ENABLE;
718 writel(tmp, mmio + AHCI_GLOBAL_CTL);
719
720 tmp = readl(mmio + AHCI_GLOBAL_CTL);
721 if (tmp & AHCI_ENABLE)
722 rc = -EIO;
723 }
724
725 pci_iounmap(pdev, mmio);
726 return rc;
727}
728
729/**
730 * piix_check_450nx_errata - Check for problem 450NX setup
731 * @ata_dev: the PCI device to check
732 *
733 * Check for the present of 450NX errata #19 and errata #25. If
734 * they are found return an error code so we can turn off DMA
735 */
736
737static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
738{
739 struct pci_dev *pdev = NULL;
740 u16 cfg;
741 u8 rev;
742 int no_piix_dma = 0;
743
744 while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
745 {
746 /* Look for 450NX PXB. Check for problem configurations
747 A PCI quirk checks bit 6 already */
748 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
749 pci_read_config_word(pdev, 0x41, &cfg);
750 /* Only on the original revision: IDE DMA can hang */
751 if (rev == 0x00)
752 no_piix_dma = 1;
753 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
754 else if (cfg & (1<<14) && rev < 5)
755 no_piix_dma = 2;
756 }
757 if (no_piix_dma)
758 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
759 if (no_piix_dma == 2)
760 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
761 return no_piix_dma;
762}
763
764static void __devinit piix_init_sata_map(struct pci_dev *pdev,
765 struct ata_port_info *pinfo)
766{
767 struct piix_map_db *map_db = pinfo[0].private_data;
768 const unsigned int *map;
769 int i, invalid_map = 0;
770 u8 map_value;
771
772 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
773
774 map = map_db->map[map_value & map_db->mask];
775
776 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
777 for (i = 0; i < 4; i++) {
778 switch (map[i]) {
779 case RV:
780 invalid_map = 1;
781 printk(" XX");
782 break;
783
784 case NA:
785 printk(" --");
786 break;
787
788 case IDE:
789 WARN_ON((i & 1) || map[i + 1] != IDE);
790 pinfo[i / 2] = piix_port_info[ich5_pata];
791 i++;
792 printk(" IDE IDE");
793 break;
794
795 default:
796 printk(" P%d", map[i]);
797 if (i & 1)
798 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
799 break;
800 }
801 }
802 printk(" ]\n");
803
804 if (invalid_map)
805 dev_printk(KERN_ERR, &pdev->dev,
806 "invalid MAP value %u\n", map_value);
807
808 pinfo[0].private_data = (void *)map;
809 pinfo[1].private_data = (void *)map;
810}
811
812/**
813 * piix_init_one - Register PIIX ATA PCI device with kernel services
814 * @pdev: PCI device to register
815 * @ent: Entry in piix_pci_tbl matching with @pdev
816 *
817 * Called from kernel PCI layer. We probe for combined mode (sigh),
818 * and then hand over control to libata, for it to do the rest.
819 *
820 * LOCKING:
821 * Inherited from PCI layer (may sleep).
822 *
823 * RETURNS:
824 * Zero on success, or -ERRNO value.
825 */
826
827static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
828{
829 static int printed_version;
830 struct ata_port_info port_info[2];
831 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
832 unsigned long host_flags;
833
834 if (!printed_version++)
835 dev_printk(KERN_DEBUG, &pdev->dev,
836 "version " DRV_VERSION "\n");
837
838 /* no hotplugging support (FIXME) */
839 if (!in_module_init)
840 return -ENODEV;
841
842 port_info[0] = piix_port_info[ent->driver_data];
843 port_info[1] = piix_port_info[ent->driver_data];
844
845 host_flags = port_info[0].host_flags;
846
847 if (host_flags & PIIX_FLAG_AHCI) {
848 u8 tmp;
849 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
850 if (tmp == PIIX_AHCI_DEVICE) {
851 int rc = piix_disable_ahci(pdev);
852 if (rc)
853 return rc;
854 }
855 }
856
857 /* Initialize SATA map */
858 if (host_flags & ATA_FLAG_SATA)
859 piix_init_sata_map(pdev, port_info);
860
861 /* On ICH5, some BIOSen disable the interrupt using the
862 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
863 * On ICH6, this bit has the same effect, but only when
864 * MSI is disabled (and it is disabled, as we don't use
865 * message-signalled interrupts currently).
866 */
867 if (host_flags & PIIX_FLAG_CHECKINTR)
868 pci_intx(pdev, 1);
869
870 if (piix_check_450nx_errata(pdev)) {
871 /* This writes into the master table but it does not
872 really matter for this errata as we will apply it to
873 all the PIIX devices on the board */
874 port_info[0].mwdma_mask = 0;
875 port_info[0].udma_mask = 0;
876 port_info[1].mwdma_mask = 0;
877 port_info[1].udma_mask = 0;
878 }
879 return ata_pci_init_one(pdev, ppinfo, 2);
880}
881
882static int __init piix_init(void)
883{
884 int rc;
885
886 DPRINTK("pci_module_init\n");
887 rc = pci_module_init(&piix_pci_driver);
888 if (rc)
889 return rc;
890
891 in_module_init = 0;
892
893 DPRINTK("done\n");
894 return 0;
895}
896
897static void __exit piix_exit(void)
898{
899 pci_unregister_driver(&piix_pci_driver);
900}
901
902module_init(piix_init);
903module_exit(piix_exit);
904
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 57295bcea3e7..e397129c90d1 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
507 */ 507 */
508 508
509 if (cmd->use_sg) { 509 if (cmd->use_sg) {
510 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 510 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
511 cmd->SCp.buffers_residual = cmd->use_sg - 1; 511 cmd->SCp.buffers_residual = cmd->use_sg - 1;
512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ 512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
513 cmd->SCp.buffer->offset; 513 cmd->SCp.buffer->offset;
@@ -524,7 +524,6 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
524 } 524 }
525} 525}
526 526
527#include <linux/config.h>
528#include <linux/delay.h> 527#include <linux/delay.h>
529 528
530#if NDEBUG 529#if NDEBUG
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index f677c5a32a68..e1be4a4387cd 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -65,7 +65,6 @@
65 65
66 66
67 67
68#include <linux/config.h>
69#include <linux/module.h> 68#include <linux/module.h>
70 69
71#define NDEBUG (0) 70#define NDEBUG (0)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 3ee4d4d3f445..0ec41f34f462 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -2625,29 +2625,32 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2625 unsigned int base_io, tmport, error,n; 2625 unsigned int base_io, tmport, error,n;
2626 unsigned char host_id; 2626 unsigned char host_id;
2627 struct Scsi_Host *shpnt = NULL; 2627 struct Scsi_Host *shpnt = NULL;
2628 struct atp_unit atp_dev, *p; 2628 struct atp_unit *atpdev, *p;
2629 unsigned char setupdata[2][16]; 2629 unsigned char setupdata[2][16];
2630 int count = 0; 2630 int count = 0;
2631 2631
2632 atpdev = kzalloc(sizeof(*atpdev), GFP_KERNEL);
2633 if (!atpdev)
2634 return -ENOMEM;
2635
2632 if (pci_enable_device(pdev)) 2636 if (pci_enable_device(pdev))
2633 return -EIO; 2637 goto err_eio;
2634 2638
2635 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 2639 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
2636 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); 2640 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n");
2637 } else { 2641 } else {
2638 printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); 2642 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
2639 return -EIO; 2643 goto err_eio;
2640 } 2644 }
2641 2645
2642 memset(&atp_dev, 0, sizeof atp_dev);
2643 /* 2646 /*
2644 * It's probably easier to weed out some revisions like 2647 * It's probably easier to weed out some revisions like
2645 * this than via the PCI device table 2648 * this than via the PCI device table
2646 */ 2649 */
2647 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { 2650 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
2648 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atp_dev.chip_ver); 2651 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
2649 if (atp_dev.chip_ver < 2) 2652 if (atpdev->chip_ver < 2)
2650 return -EIO; 2653 goto err_eio;
2651 } 2654 }
2652 2655
2653 switch (ent->device) { 2656 switch (ent->device) {
@@ -2656,15 +2659,15 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2656 case ATP880_DEVID1: 2659 case ATP880_DEVID1:
2657 case ATP880_DEVID2: 2660 case ATP880_DEVID2:
2658 case ATP885_DEVID: 2661 case ATP885_DEVID:
2659 atp_dev.chip_ver = 0x04; 2662 atpdev->chip_ver = 0x04;
2660 default: 2663 default:
2661 break; 2664 break;
2662 } 2665 }
2663 base_io = pci_resource_start(pdev, 0); 2666 base_io = pci_resource_start(pdev, 0);
2664 base_io &= 0xfffffff8; 2667 base_io &= 0xfffffff8;
2665 2668
2666 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { 2669 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
2667 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atp_dev.chip_ver); 2670 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
2668 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 2671 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
2669 2672
2670 host_id = inb(base_io + 0x39); 2673 host_id = inb(base_io + 0x39);
@@ -2672,17 +2675,17 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2672 2675
2673 printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d" 2676 printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d"
2674 " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); 2677 " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
2675 atp_dev.ioport[0] = base_io + 0x40; 2678 atpdev->ioport[0] = base_io + 0x40;
2676 atp_dev.pciport[0] = base_io + 0x28; 2679 atpdev->pciport[0] = base_io + 0x28;
2677 atp_dev.dev_id = ent->device; 2680 atpdev->dev_id = ent->device;
2678 atp_dev.host_id[0] = host_id; 2681 atpdev->host_id[0] = host_id;
2679 2682
2680 tmport = base_io + 0x22; 2683 tmport = base_io + 0x22;
2681 atp_dev.scam_on = inb(tmport); 2684 atpdev->scam_on = inb(tmport);
2682 tmport += 0x13; 2685 tmport += 0x13;
2683 atp_dev.global_map[0] = inb(tmport); 2686 atpdev->global_map[0] = inb(tmport);
2684 tmport += 0x07; 2687 tmport += 0x07;
2685 atp_dev.ultra_map[0] = inw(tmport); 2688 atpdev->ultra_map[0] = inw(tmport);
2686 2689
2687 n = 0x3f09; 2690 n = 0x3f09;
2688next_fblk_880: 2691next_fblk_880:
@@ -2695,63 +2698,63 @@ next_fblk_880:
2695 if (inb(base_io + 0x30) == 0xff) 2698 if (inb(base_io + 0x30) == 0xff)
2696 goto flash_ok_880; 2699 goto flash_ok_880;
2697 2700
2698 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2701 atpdev->sp[0][m++] = inb(base_io + 0x30);
2699 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2702 atpdev->sp[0][m++] = inb(base_io + 0x31);
2700 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2703 atpdev->sp[0][m++] = inb(base_io + 0x32);
2701 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2704 atpdev->sp[0][m++] = inb(base_io + 0x33);
2702 outw(n, base_io + 0x34); 2705 outw(n, base_io + 0x34);
2703 n += 0x0002; 2706 n += 0x0002;
2704 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2707 atpdev->sp[0][m++] = inb(base_io + 0x30);
2705 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2708 atpdev->sp[0][m++] = inb(base_io + 0x31);
2706 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2709 atpdev->sp[0][m++] = inb(base_io + 0x32);
2707 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2710 atpdev->sp[0][m++] = inb(base_io + 0x33);
2708 outw(n, base_io + 0x34); 2711 outw(n, base_io + 0x34);
2709 n += 0x0002; 2712 n += 0x0002;
2710 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2713 atpdev->sp[0][m++] = inb(base_io + 0x30);
2711 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2714 atpdev->sp[0][m++] = inb(base_io + 0x31);
2712 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2715 atpdev->sp[0][m++] = inb(base_io + 0x32);
2713 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2716 atpdev->sp[0][m++] = inb(base_io + 0x33);
2714 outw(n, base_io + 0x34); 2717 outw(n, base_io + 0x34);
2715 n += 0x0002; 2718 n += 0x0002;
2716 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2719 atpdev->sp[0][m++] = inb(base_io + 0x30);
2717 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2720 atpdev->sp[0][m++] = inb(base_io + 0x31);
2718 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2721 atpdev->sp[0][m++] = inb(base_io + 0x32);
2719 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2722 atpdev->sp[0][m++] = inb(base_io + 0x33);
2720 n += 0x0018; 2723 n += 0x0018;
2721 goto next_fblk_880; 2724 goto next_fblk_880;
2722flash_ok_880: 2725flash_ok_880:
2723 outw(0, base_io + 0x34); 2726 outw(0, base_io + 0x34);
2724 atp_dev.ultra_map[0] = 0; 2727 atpdev->ultra_map[0] = 0;
2725 atp_dev.async[0] = 0; 2728 atpdev->async[0] = 0;
2726 for (k = 0; k < 16; k++) { 2729 for (k = 0; k < 16; k++) {
2727 n = 1; 2730 n = 1;
2728 n = n << k; 2731 n = n << k;
2729 if (atp_dev.sp[0][k] > 1) { 2732 if (atpdev->sp[0][k] > 1) {
2730 atp_dev.ultra_map[0] |= n; 2733 atpdev->ultra_map[0] |= n;
2731 } else { 2734 } else {
2732 if (atp_dev.sp[0][k] == 0) 2735 if (atpdev->sp[0][k] == 0)
2733 atp_dev.async[0] |= n; 2736 atpdev->async[0] |= n;
2734 } 2737 }
2735 } 2738 }
2736 atp_dev.async[0] = ~(atp_dev.async[0]); 2739 atpdev->async[0] = ~(atpdev->async[0]);
2737 outb(atp_dev.global_map[0], base_io + 0x35); 2740 outb(atpdev->global_map[0], base_io + 0x35);
2738 2741
2739 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2742 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2740 if (!shpnt) 2743 if (!shpnt)
2741 return -ENOMEM; 2744 goto err_nomem;
2742 2745
2743 p = (struct atp_unit *)&shpnt->hostdata; 2746 p = (struct atp_unit *)&shpnt->hostdata;
2744 2747
2745 atp_dev.host = shpnt; 2748 atpdev->host = shpnt;
2746 atp_dev.pdev = pdev; 2749 atpdev->pdev = pdev;
2747 pci_set_drvdata(pdev, p); 2750 pci_set_drvdata(pdev, p);
2748 memcpy(p, &atp_dev, sizeof atp_dev); 2751 memcpy(p, atpdev, sizeof(*atpdev));
2749 if (atp870u_init_tables(shpnt) < 0) { 2752 if (atp870u_init_tables(shpnt) < 0) {
2750 printk(KERN_ERR "Unable to allocate tables for Acard controller\n"); 2753 printk(KERN_ERR "Unable to allocate tables for Acard controller\n");
2751 goto unregister; 2754 goto unregister;
2752 } 2755 }
2753 2756
2754 if (request_irq(pdev->irq, atp870u_intr_handle, SA_SHIRQ, "atp880i", shpnt)) { 2757 if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp880i", shpnt)) {
2755 printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); 2758 printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq);
2756 goto free_tables; 2759 goto free_tables;
2757 } 2760 }
@@ -2798,31 +2801,31 @@ flash_ok_880:
2798 printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n" 2801 printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n"
2799 , base_io, pdev->irq); 2802 , base_io, pdev->irq);
2800 2803
2801 atp_dev.pdev = pdev; 2804 atpdev->pdev = pdev;
2802 atp_dev.dev_id = ent->device; 2805 atpdev->dev_id = ent->device;
2803 atp_dev.baseport = base_io; 2806 atpdev->baseport = base_io;
2804 atp_dev.ioport[0] = base_io + 0x80; 2807 atpdev->ioport[0] = base_io + 0x80;
2805 atp_dev.ioport[1] = base_io + 0xc0; 2808 atpdev->ioport[1] = base_io + 0xc0;
2806 atp_dev.pciport[0] = base_io + 0x40; 2809 atpdev->pciport[0] = base_io + 0x40;
2807 atp_dev.pciport[1] = base_io + 0x50; 2810 atpdev->pciport[1] = base_io + 0x50;
2808 2811
2809 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2812 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2810 if (!shpnt) 2813 if (!shpnt)
2811 return -ENOMEM; 2814 goto err_nomem;
2812 2815
2813 p = (struct atp_unit *)&shpnt->hostdata; 2816 p = (struct atp_unit *)&shpnt->hostdata;
2814 2817
2815 atp_dev.host = shpnt; 2818 atpdev->host = shpnt;
2816 atp_dev.pdev = pdev; 2819 atpdev->pdev = pdev;
2817 pci_set_drvdata(pdev, p); 2820 pci_set_drvdata(pdev, p);
2818 memcpy(p, &atp_dev, sizeof(struct atp_unit)); 2821 memcpy(p, atpdev, sizeof(struct atp_unit));
2819 if (atp870u_init_tables(shpnt) < 0) 2822 if (atp870u_init_tables(shpnt) < 0)
2820 goto unregister; 2823 goto unregister;
2821 2824
2822#ifdef ED_DBGP 2825#ifdef ED_DBGP
2823 printk("request_irq() shpnt %p hostdata %p\n", shpnt, p); 2826 printk("request_irq() shpnt %p hostdata %p\n", shpnt, p);
2824#endif 2827#endif
2825 if (request_irq(pdev->irq, atp870u_intr_handle, SA_SHIRQ, "atp870u", shpnt)) { 2828 if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt)) {
2826 printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n"); 2829 printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n");
2827 goto free_tables; 2830 goto free_tables;
2828 } 2831 }
@@ -2974,43 +2977,43 @@ flash_ok_885:
2974 printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d " 2977 printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d "
2975 "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); 2978 "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
2976 2979
2977 atp_dev.ioport[0] = base_io; 2980 atpdev->ioport[0] = base_io;
2978 atp_dev.pciport[0] = base_io + 0x20; 2981 atpdev->pciport[0] = base_io + 0x20;
2979 atp_dev.dev_id = ent->device; 2982 atpdev->dev_id = ent->device;
2980 host_id &= 0x07; 2983 host_id &= 0x07;
2981 atp_dev.host_id[0] = host_id; 2984 atpdev->host_id[0] = host_id;
2982 tmport = base_io + 0x22; 2985 tmport = base_io + 0x22;
2983 atp_dev.scam_on = inb(tmport); 2986 atpdev->scam_on = inb(tmport);
2984 tmport += 0x0b; 2987 tmport += 0x0b;
2985 atp_dev.global_map[0] = inb(tmport++); 2988 atpdev->global_map[0] = inb(tmport++);
2986 atp_dev.ultra_map[0] = inw(tmport); 2989 atpdev->ultra_map[0] = inw(tmport);
2987 2990
2988 if (atp_dev.ultra_map[0] == 0) { 2991 if (atpdev->ultra_map[0] == 0) {
2989 atp_dev.scam_on = 0x00; 2992 atpdev->scam_on = 0x00;
2990 atp_dev.global_map[0] = 0x20; 2993 atpdev->global_map[0] = 0x20;
2991 atp_dev.ultra_map[0] = 0xffff; 2994 atpdev->ultra_map[0] = 0xffff;
2992 } 2995 }
2993 2996
2994 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2997 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2995 if (!shpnt) 2998 if (!shpnt)
2996 return -ENOMEM; 2999 goto err_nomem;
2997 3000
2998 p = (struct atp_unit *)&shpnt->hostdata; 3001 p = (struct atp_unit *)&shpnt->hostdata;
2999 3002
3000 atp_dev.host = shpnt; 3003 atpdev->host = shpnt;
3001 atp_dev.pdev = pdev; 3004 atpdev->pdev = pdev;
3002 pci_set_drvdata(pdev, p); 3005 pci_set_drvdata(pdev, p);
3003 memcpy(p, &atp_dev, sizeof atp_dev); 3006 memcpy(p, atpdev, sizeof(*atpdev));
3004 if (atp870u_init_tables(shpnt) < 0) 3007 if (atp870u_init_tables(shpnt) < 0)
3005 goto unregister; 3008 goto unregister;
3006 3009
3007 if (request_irq(pdev->irq, atp870u_intr_handle, SA_SHIRQ, "atp870i", shpnt)) { 3010 if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870i", shpnt)) {
3008 printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); 3011 printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq);
3009 goto free_tables; 3012 goto free_tables;
3010 } 3013 }
3011 3014
3012 spin_lock_irqsave(shpnt->host_lock, flags); 3015 spin_lock_irqsave(shpnt->host_lock, flags);
3013 if (atp_dev.chip_ver > 0x07) { /* check if atp876 chip then enable terminator */ 3016 if (atpdev->chip_ver > 0x07) { /* check if atp876 chip then enable terminator */
3014 tmport = base_io + 0x3e; 3017 tmport = base_io + 0x3e;
3015 outb(0x00, tmport); 3018 outb(0x00, tmport);
3016 } 3019 }
@@ -3044,7 +3047,7 @@ flash_ok_885:
3044 outb((inb(tmport) & 0xef), tmport); 3047 outb((inb(tmport) & 0xef), tmport);
3045 tmport++; 3048 tmport++;
3046 outb((inb(tmport) | 0x20), tmport); 3049 outb((inb(tmport) | 0x20), tmport);
3047 if (atp_dev.chip_ver == 4) 3050 if (atpdev->chip_ver == 4)
3048 shpnt->max_id = 16; 3051 shpnt->max_id = 16;
3049 else 3052 else
3050 shpnt->max_id = 8; 3053 shpnt->max_id = 8;
@@ -3093,6 +3096,12 @@ unregister:
3093 printk("atp870u_prob:unregister\n"); 3096 printk("atp870u_prob:unregister\n");
3094 scsi_host_put(shpnt); 3097 scsi_host_put(shpnt);
3095 return -1; 3098 return -1;
3099err_eio:
3100 kfree(atpdev);
3101 return -EIO;
3102err_nomem:
3103 kfree(atpdev);
3104 return -ENOMEM;
3096} 3105}
3097 3106
3098/* The abort command does not leave the device in a clean state where 3107/* The abort command does not leave the device in a clean state where
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
index 3867ac2de4c2..329a8f297b31 100644
--- a/drivers/scsi/blz1230.c
+++ b/drivers/scsi/blz1230.c
@@ -172,7 +172,7 @@ int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
172 172
173 esp->irq = IRQ_AMIGA_PORTS; 173 esp->irq = IRQ_AMIGA_PORTS;
174 esp->slot = board+REAL_BLZ1230_ESP_ADDR; 174 esp->slot = board+REAL_BLZ1230_ESP_ADDR;
175 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 175 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
176 "Blizzard 1230 SCSI IV", esp->ehost)) 176 "Blizzard 1230 SCSI IV", esp->ehost))
177 goto err_out; 177 goto err_out;
178 178
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
index 4ebe69e32756..b6c137b97350 100644
--- a/drivers/scsi/blz2060.c
+++ b/drivers/scsi/blz2060.c
@@ -146,7 +146,7 @@ int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
146 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 146 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
147 147
148 esp->irq = IRQ_AMIGA_PORTS; 148 esp->irq = IRQ_AMIGA_PORTS;
149 request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 149 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
150 "Blizzard 2060 SCSI", esp->ehost); 150 "Blizzard 2060 SCSI", esp->ehost);
151 151
152 /* Figure out our scsi ID on the bus */ 152 /* Figure out our scsi ID on the bus */
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index d9abd1645d15..f6caa4307768 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -7,7 +7,6 @@
7 7
8#define VERSION "0.25" 8#define VERSION "0.25"
9 9
10#include <linux/config.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/fs.h> 12#include <linux/fs.h>
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d92d5040a9fe..61f6024b61ba 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -5,9 +5,9 @@
5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
6 * by D. Gilbert and aeb (20020609) 6 * by D. Gilbert and aeb (20020609)
7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
8 * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
8 */ 9 */
9 10
10#include <linux/config.h>
11#include <linux/blkdev.h> 11#include <linux/blkdev.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -37,55 +37,56 @@ static const char * cdb_byte0_names[] = {
37/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", 37/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
38/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, 38/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
39 "Reasssign Blocks", 39 "Reasssign Blocks",
40/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, 40/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
41/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", 41/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
42/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", 42/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
43 "Reserve (6)", 43 "Reserve(6)",
44/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", 44/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
45/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", 45/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
46/* 1e-1f */ "Prevent/Allow Medium Removal", NULL, 46/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
47/* 20-22 */ NULL, NULL, NULL, 47/* 20-22 */ NULL, NULL, NULL,
48/* 23-28 */ "Read Format Capacities", "Set Window", 48/* 23-28 */ "Read Format Capacities", "Set Window",
49 "Read Capacity (10)", NULL, NULL, "Read (10)", 49 "Read Capacity(10)", NULL, NULL, "Read(10)",
50/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", 50/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
51 "Read updated block", 51 "Read updated block",
52/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", 52/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
53/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", 53/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
54/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", 54/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
55 "Read Defect Data(10)", 55 "Read Defect Data(10)",
56/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 56/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
57 "Read Buffer", 57 "Read Buffer",
58/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", 58/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
59/* 40-41 */ "Change Definition", "Write Same (10)", 59/* 40-41 */ "Change Definition", "Write Same(10)",
60/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 60/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
61 "Play audio (10)", "Get configuration", "Play audio msf", 61 "Play audio(10)", "Get configuration", "Play audio msf",
62 "Play audio track/index", 62 "Play audio track/index",
63/* 49-4f */ "Play track relative (10)", "Get event status notification", 63/* 49-4f */ "Play track relative(10)", "Get event status notification",
64 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 64 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
65 NULL, 65 NULL,
66/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", 66/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
67 "Reserve track", "Send OPC info", "Mode Select (10)", 67 "Reserve track", "Send OPC info", "Mode Select(10)",
68/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", 68/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
69 "Mode Sense (10)", "Close track/session", 69 "Mode Sense(10)", "Close track/session",
70/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", 70/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
71 "Persistent reserve out", 71 "Persistent reserve out",
72/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
75/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 75/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
76/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", 76/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
77 "Receive copy results", 77 "Receive copy results",
78/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", 78/* 85-89 */ "ATA command pass through(16)", "Access control in",
79 "Read (16)", "Memory Export Out (16)", 79 "Access control out", "Read(16)", "Memory Export Out(16)",
80/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", 80/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
81 "Write and verify (16)", "Verify (16)", 81 "Write and verify(16)", "Verify(16)",
82/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", 82/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
83 "Lock/unlock cache (16)", "Write same (16)", NULL, 83 "Lock/unlock cache(16)", "Write same(16)", NULL,
84/* 95-99 */ NULL, NULL, NULL, NULL, NULL, 84/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
85/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", 85/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)",
86 "Service action out (16)", 86 "Service action out(16)",
87/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", 87/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
88 "Maintenance out", "Move medium/play audio(12)", 88 "Security protocol in", "Maintenance in", "Maintenance out",
89 "Move medium/play audio(12)",
89/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", 90/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
90 "Play track relative(12)", 91 "Play track relative(12)",
91/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", 92/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
@@ -93,12 +94,12 @@ static const char * cdb_byte0_names[] = {
93/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", 94/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
94/* b2-b4 */ "Search data low(12)", "Set limits(12)", 95/* b2-b4 */ "Search data low(12)", "Set limits(12)",
95 "Read element status attached", 96 "Read element status attached",
96/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", 97/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
97/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", 98/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
98/* ba-bc */ "Redundancy group (in), Scan", 99/* ba-bc */ "Redundancy group (in), Scan",
99 "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", 100 "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
100/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", 101/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
101 "Volume set out, Send DVD structure", 102 "Volume set (out), Send DVD structure",
102}; 103};
103 104
104struct value_name_pair { 105struct value_name_pair {
@@ -113,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = {
113 {0xc, "Report supported operation codes"}, 114 {0xc, "Report supported operation codes"},
114 {0xd, "Report supported task management functions"}, 115 {0xd, "Report supported task management functions"},
115 {0xe, "Report priority"}, 116 {0xe, "Report priority"},
117 {0xf, "Report timestamp"},
116}; 118};
117#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
118 120
@@ -121,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = {
121 {0xa, "Set target port groups"}, 123 {0xa, "Set target port groups"},
122 {0xb, "Change aliases"}, 124 {0xb, "Change aliases"},
123 {0xe, "Set priority"}, 125 {0xe, "Set priority"},
126 {0xe, "Set timestamp"},
124}; 127};
125#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 128#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
126 129
@@ -428,6 +431,7 @@ static struct error_info additional[] =
428 {0x001A, "Rewind operation in progress"}, 431 {0x001A, "Rewind operation in progress"},
429 {0x001B, "Set capacity operation in progress"}, 432 {0x001B, "Set capacity operation in progress"},
430 {0x001C, "Verify operation in progress"}, 433 {0x001C, "Verify operation in progress"},
434 {0x001D, "ATA pass through information available"},
431 435
432 {0x0100, "No index/sector signal"}, 436 {0x0100, "No index/sector signal"},
433 437
@@ -439,7 +443,7 @@ static struct error_info additional[] =
439 443
440 {0x0400, "Logical unit not ready, cause not reportable"}, 444 {0x0400, "Logical unit not ready, cause not reportable"},
441 {0x0401, "Logical unit is in process of becoming ready"}, 445 {0x0401, "Logical unit is in process of becoming ready"},
442 {0x0402, "Logical unit not ready, initializing cmd. required"}, 446 {0x0402, "Logical unit not ready, initializing command required"},
443 {0x0403, "Logical unit not ready, manual intervention required"}, 447 {0x0403, "Logical unit not ready, manual intervention required"},
444 {0x0404, "Logical unit not ready, format in progress"}, 448 {0x0404, "Logical unit not ready, format in progress"},
445 {0x0405, "Logical unit not ready, rebuild in progress"}, 449 {0x0405, "Logical unit not ready, rebuild in progress"},
@@ -479,6 +483,9 @@ static struct error_info additional[] =
479 {0x0B00, "Warning"}, 483 {0x0B00, "Warning"},
480 {0x0B01, "Warning - specified temperature exceeded"}, 484 {0x0B01, "Warning - specified temperature exceeded"},
481 {0x0B02, "Warning - enclosure degraded"}, 485 {0x0B02, "Warning - enclosure degraded"},
486 {0x0B03, "Warning - background self-test failed"},
487 {0x0B04, "Warning - background pre-scan detected medium error"},
488 {0x0B05, "Warning - background medium scan detected medium error"},
482 489
483 {0x0C00, "Write error"}, 490 {0x0C00, "Write error"},
484 {0x0C01, "Write error - recovered with auto reallocation"}, 491 {0x0C01, "Write error - recovered with auto reallocation"},
@@ -494,6 +501,7 @@ static struct error_info additional[] =
494 {0x0C0B, "Auxiliary memory write error"}, 501 {0x0C0B, "Auxiliary memory write error"},
495 {0x0C0C, "Write error - unexpected unsolicited data"}, 502 {0x0C0C, "Write error - unexpected unsolicited data"},
496 {0x0C0D, "Write error - not enough unsolicited data"}, 503 {0x0C0D, "Write error - not enough unsolicited data"},
504 {0x0C0F, "Defects in error window"},
497 505
498 {0x0D00, "Error detected by third party temporary initiator"}, 506 {0x0D00, "Error detected by third party temporary initiator"},
499 {0x0D01, "Third party device failure"}, 507 {0x0D01, "Third party device failure"},
@@ -505,11 +513,12 @@ static struct error_info additional[] =
505 {0x0E00, "Invalid information unit"}, 513 {0x0E00, "Invalid information unit"},
506 {0x0E01, "Information unit too short"}, 514 {0x0E01, "Information unit too short"},
507 {0x0E02, "Information unit too long"}, 515 {0x0E02, "Information unit too long"},
516 {0x0E03, "Invalid field in command information unit"},
508 517
509 {0x1000, "Id CRC or ECC error"}, 518 {0x1000, "Id CRC or ECC error"},
510 {0x1001, "Data block guard check failed"}, 519 {0x1001, "Logical block guard check failed"},
511 {0x1002, "Data block application tag check failed"}, 520 {0x1002, "Logical block application tag check failed"},
512 {0x1003, "Data block reference tag check failed"}, 521 {0x1003, "Logical block reference tag check failed"},
513 522
514 {0x1100, "Unrecovered read error"}, 523 {0x1100, "Unrecovered read error"},
515 {0x1101, "Read retries exhausted"}, 524 {0x1101, "Read retries exhausted"},
@@ -531,6 +540,7 @@ static struct error_info additional[] =
531 {0x1111, "Read error - loss of streaming"}, 540 {0x1111, "Read error - loss of streaming"},
532 {0x1112, "Auxiliary memory read error"}, 541 {0x1112, "Auxiliary memory read error"},
533 {0x1113, "Read error - failed retransmission request"}, 542 {0x1113, "Read error - failed retransmission request"},
543 {0x1114, "Read error - lba marked bad by application client"},
534 544
535 {0x1200, "Address mark not found for id field"}, 545 {0x1200, "Address mark not found for id field"},
536 546
@@ -611,11 +621,14 @@ static struct error_info additional[] =
611 {0x2100, "Logical block address out of range"}, 621 {0x2100, "Logical block address out of range"},
612 {0x2101, "Invalid element address"}, 622 {0x2101, "Invalid element address"},
613 {0x2102, "Invalid address for write"}, 623 {0x2102, "Invalid address for write"},
624 {0x2103, "Invalid write crossing layer jump"},
614 625
615 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 626 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
616 627
617 {0x2400, "Invalid field in cdb"}, 628 {0x2400, "Invalid field in cdb"},
618 {0x2401, "CDB decryption error"}, 629 {0x2401, "CDB decryption error"},
630 {0x2402, "Obsolete"},
631 {0x2403, "Obsolete"},
619 {0x2404, "Security audit value frozen"}, 632 {0x2404, "Security audit value frozen"},
620 {0x2405, "Security working key frozen"}, 633 {0x2405, "Security working key frozen"},
621 {0x2406, "Nonce not unique"}, 634 {0x2406, "Nonce not unique"},
@@ -638,7 +651,10 @@ static struct error_info additional[] =
638 {0x260C, "Invalid operation for copy source or destination"}, 651 {0x260C, "Invalid operation for copy source or destination"},
639 {0x260D, "Copy segment granularity violation"}, 652 {0x260D, "Copy segment granularity violation"},
640 {0x260E, "Invalid parameter while port is enabled"}, 653 {0x260E, "Invalid parameter while port is enabled"},
641 {0x260F, "Invalid data-out buffer integrity"}, 654 {0x260F, "Invalid data-out buffer integrity check value"},
655 {0x2610, "Data decryption key fail limit reached"},
656 {0x2611, "Incomplete key-associated data set"},
657 {0x2612, "Vendor specific key reference not found"},
642 658
643 {0x2700, "Write protected"}, 659 {0x2700, "Write protected"},
644 {0x2701, "Hardware write protected"}, 660 {0x2701, "Hardware write protected"},
@@ -650,6 +666,7 @@ static struct error_info additional[] =
650 666
651 {0x2800, "Not ready to ready change, medium may have changed"}, 667 {0x2800, "Not ready to ready change, medium may have changed"},
652 {0x2801, "Import or export element accessed"}, 668 {0x2801, "Import or export element accessed"},
669 {0x2802, "Format-layer may have changed"},
653 670
654 {0x2900, "Power on, reset, or bus device reset occurred"}, 671 {0x2900, "Power on, reset, or bus device reset occurred"},
655 {0x2901, "Power on occurred"}, 672 {0x2901, "Power on occurred"},
@@ -670,6 +687,11 @@ static struct error_info additional[] =
670 {0x2A07, "Implicit asymmetric access state transition failed"}, 687 {0x2A07, "Implicit asymmetric access state transition failed"},
671 {0x2A08, "Priority changed"}, 688 {0x2A08, "Priority changed"},
672 {0x2A09, "Capacity data has changed"}, 689 {0x2A09, "Capacity data has changed"},
690 {0x2A10, "Timestamp changed"},
691 {0x2A11, "Data encryption parameters changed by another i_t nexus"},
692 {0x2A12, "Data encryption parameters changed by vendor specific "
693 "event"},
694 {0x2A13, "Data encryption key instance counter has changed"},
673 695
674 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 696 {0x2B00, "Copy cannot execute since host cannot disconnect"},
675 697
@@ -691,6 +713,7 @@ static struct error_info additional[] =
691 {0x2E00, "Insufficient time for operation"}, 713 {0x2E00, "Insufficient time for operation"},
692 714
693 {0x2F00, "Commands cleared by another initiator"}, 715 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"},
694 717
695 {0x3000, "Incompatible medium installed"}, 718 {0x3000, "Incompatible medium installed"},
696 {0x3001, "Cannot read medium - unknown format"}, 719 {0x3001, "Cannot read medium - unknown format"},
@@ -703,7 +726,8 @@ static struct error_info additional[] =
703 {0x3008, "Cannot write - application code mismatch"}, 726 {0x3008, "Cannot write - application code mismatch"},
704 {0x3009, "Current session not fixated for append"}, 727 {0x3009, "Current session not fixated for append"},
705 {0x300A, "Cleaning request rejected"}, 728 {0x300A, "Cleaning request rejected"},
706 {0x300C, "WORM medium, overwrite attempted"}, 729 {0x300C, "WORM medium - overwrite attempted"},
730 {0x300D, "WORM medium - integrity check"},
707 {0x3010, "Medium not formatted"}, 731 {0x3010, "Medium not formatted"},
708 732
709 {0x3100, "Medium format corrupted"}, 733 {0x3100, "Medium format corrupted"},
@@ -791,6 +815,9 @@ static struct error_info additional[] =
791 {0x3F0F, "Echo buffer overwritten"}, 815 {0x3F0F, "Echo buffer overwritten"},
792 {0x3F10, "Medium loadable"}, 816 {0x3F10, "Medium loadable"},
793 {0x3F11, "Medium auxiliary memory accessible"}, 817 {0x3F11, "Medium auxiliary memory accessible"},
818 {0x3F12, "iSCSI IP address added"},
819 {0x3F13, "iSCSI IP address removed"},
820 {0x3F14, "iSCSI IP address changed"},
794/* 821/*
795 * {0x40NN, "Ram failure"}, 822 * {0x40NN, "Ram failure"},
796 * {0x40NN, "Diagnostic failure on component nn"}, 823 * {0x40NN, "Diagnostic failure on component nn"},
@@ -800,6 +827,7 @@ static struct error_info additional[] =
800 {0x4300, "Message error"}, 827 {0x4300, "Message error"},
801 828
802 {0x4400, "Internal target failure"}, 829 {0x4400, "Internal target failure"},
830 {0x4471, "ATA device failed set features"},
803 831
804 {0x4500, "Select or reselect failure"}, 832 {0x4500, "Select or reselect failure"},
805 833
@@ -808,9 +836,10 @@ static struct error_info additional[] =
808 {0x4700, "Scsi parity error"}, 836 {0x4700, "Scsi parity error"},
809 {0x4701, "Data phase CRC error detected"}, 837 {0x4701, "Data phase CRC error detected"},
810 {0x4702, "Scsi parity error detected during st data phase"}, 838 {0x4702, "Scsi parity error detected during st data phase"},
811 {0x4703, "Information unit CRC error detected"}, 839 {0x4703, "Information unit iuCRC error detected"},
812 {0x4704, "Asynchronous information protection error detected"}, 840 {0x4704, "Asynchronous information protection error detected"},
813 {0x4705, "Protocol service CRC error"}, 841 {0x4705, "Protocol service CRC error"},
842 {0x4706, "Phy test function in progress"},
814 {0x477f, "Some commands cleared by iSCSI Protocol event"}, 843 {0x477f, "Some commands cleared by iSCSI Protocol event"},
815 844
816 {0x4800, "Initiator detected error message received"}, 845 {0x4800, "Initiator detected error message received"},
@@ -845,6 +874,8 @@ static struct error_info additional[] =
845 {0x5300, "Media load or eject failed"}, 874 {0x5300, "Media load or eject failed"},
846 {0x5301, "Unload tape failure"}, 875 {0x5301, "Unload tape failure"},
847 {0x5302, "Medium removal prevented"}, 876 {0x5302, "Medium removal prevented"},
877 {0x5303, "Medium removal prevented by data transfer element"},
878 {0x5304, "Medium thread or unthread failure"},
848 879
849 {0x5400, "Scsi to host system interface failure"}, 880 {0x5400, "Scsi to host system interface failure"},
850 881
@@ -856,6 +887,7 @@ static struct error_info additional[] =
856 {0x5505, "Insufficient access control resources"}, 887 {0x5505, "Insufficient access control resources"},
857 {0x5506, "Auxiliary memory out of space"}, 888 {0x5506, "Auxiliary memory out of space"},
858 {0x5507, "Quota error"}, 889 {0x5507, "Quota error"},
890 {0x5508, "Maximum number of supplemental decryption keys exceeded"},
859 891
860 {0x5700, "Unable to recover table-of-contents"}, 892 {0x5700, "Unable to recover table-of-contents"},
861 893
@@ -1005,6 +1037,7 @@ static struct error_info additional[] =
1005 {0x6708, "Assign failure occurred"}, 1037 {0x6708, "Assign failure occurred"},
1006 {0x6709, "Multiply assigned logical unit"}, 1038 {0x6709, "Multiply assigned logical unit"},
1007 {0x670A, "Set target port groups command failed"}, 1039 {0x670A, "Set target port groups command failed"},
1040 {0x670B, "ATA device feature not enabled"},
1008 1041
1009 {0x6800, "Logical unit not configured"}, 1042 {0x6800, "Logical unit not configured"},
1010 1043
@@ -1031,6 +1064,8 @@ static struct error_info additional[] =
1031 {0x6F03, "Read of scrambled sector without authentication"}, 1064 {0x6F03, "Read of scrambled sector without authentication"},
1032 {0x6F04, "Media region code is mismatched to logical unit region"}, 1065 {0x6F04, "Media region code is mismatched to logical unit region"},
1033 {0x6F05, "Drive region must be permanent/region reset count error"}, 1066 {0x6F05, "Drive region must be permanent/region reset count error"},
1067 {0x6F06, "Insufficient block count for binding nonce recording"},
1068 {0x6F07, "Conflict in binding nonce recording"},
1034/* 1069/*
1035 * {0x70NN, "Decompression exception short algorithm id of nn"}, 1070 * {0x70NN, "Decompression exception short algorithm id of nn"},
1036 */ 1071 */
@@ -1042,6 +1077,8 @@ static struct error_info additional[] =
1042 {0x7203, "Session fixation error - incomplete track in session"}, 1077 {0x7203, "Session fixation error - incomplete track in session"},
1043 {0x7204, "Empty or partially written reserved track"}, 1078 {0x7204, "Empty or partially written reserved track"},
1044 {0x7205, "No more track reservations allowed"}, 1079 {0x7205, "No more track reservations allowed"},
1080 {0x7206, "RMZ extension is not allowed"},
1081 {0x7207, "No more test zone extensions are allowed"},
1045 1082
1046 {0x7300, "Cd control error"}, 1083 {0x7300, "Cd control error"},
1047 {0x7301, "Power calibration area almost full"}, 1084 {0x7301, "Power calibration area almost full"},
@@ -1050,6 +1087,18 @@ static struct error_info additional[] =
1050 {0x7304, "Program memory area update failure"}, 1087 {0x7304, "Program memory area update failure"},
1051 {0x7305, "Program memory area is full"}, 1088 {0x7305, "Program memory area is full"},
1052 {0x7306, "RMA/PMA is almost full"}, 1089 {0x7306, "RMA/PMA is almost full"},
1090 {0x7310, "Current power calibration area almost full"},
1091 {0x7311, "Current power calibration area is full"},
1092 {0x7317, "RDZ is full"},
1093
1094 {0x7400, "Security error"},
1095 {0x7401, "Unable to decrypt data"},
1096 {0x7402, "Unencrypted data encountered while decrypting"},
1097 {0x7403, "Incorrect data encryption key"},
1098 {0x7404, "Cryptographic integrity validation failed"},
1099 {0x7405, "Error decrypting data"},
1100 {0x7471, "Logical unit access not authorized"},
1101
1053 {0, NULL} 1102 {0, NULL}
1054}; 1103};
1055 1104
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
index a4a4fac5c0a1..7c7cfb54e897 100644
--- a/drivers/scsi/cyberstorm.c
+++ b/drivers/scsi/cyberstorm.c
@@ -172,7 +172,7 @@ int __init cyber_esp_detect(struct scsi_host_template *tpnt)
172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
173 173
174 esp->irq = IRQ_AMIGA_PORTS; 174 esp->irq = IRQ_AMIGA_PORTS;
175 request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 175 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
176 "CyberStorm SCSI", esp->ehost); 176 "CyberStorm SCSI", esp->ehost);
177 /* Figure out our scsi ID on the bus */ 177 /* Figure out our scsi ID on the bus */
178 /* The DMA cond flag contains a hardcoded jumper bit 178 /* The DMA cond flag contains a hardcoded jumper bit
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
index 3a803d73bc5f..d88cb9cf091e 100644
--- a/drivers/scsi/cyberstormII.c
+++ b/drivers/scsi/cyberstormII.c
@@ -153,7 +153,7 @@ int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
153 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 153 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
154 154
155 esp->irq = IRQ_AMIGA_PORTS; 155 esp->irq = IRQ_AMIGA_PORTS;
156 request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 156 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
157 "CyberStorm SCSI Mk II", esp->ehost); 157 "CyberStorm SCSI Mk II", esp->ehost);
158 158
159 /* Figure out our scsi ID on the bus */ 159 /* Figure out our scsi ID on the bus */
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 183245254931..ff2b1796fa34 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3771,7 +3771,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3771 * @target: The target for the new device. 3771 * @target: The target for the new device.
3772 * @lun: The lun for the new device. 3772 * @lun: The lun for the new device.
3773 * 3773 *
3774 * Return the new device if succesfull or NULL on failure. 3774 * Return the new device if successful or NULL on failure.
3775 **/ 3775 **/
3776static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, 3776static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3777 u8 target, u8 lun) 3777 u8 target, u8 lun)
@@ -4562,7 +4562,7 @@ static int __devinit adapter_init(struct AdapterCtlBlk *acb,
4562 acb->io_port_base = io_port; 4562 acb->io_port_base = io_port;
4563 acb->io_port_len = io_port_len; 4563 acb->io_port_len = io_port_len;
4564 4564
4565 if (request_irq(irq, dc395x_interrupt, SA_SHIRQ, DC395X_NAME, acb)) { 4565 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4566 /* release the region we just claimed */ 4566 /* release the region we just claimed */
4567 dprintkl(KERN_INFO, "Failed to register IRQ\n"); 4567 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4568 goto failed; 4568 goto failed;
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
index a35ee43a48df..eb32062f7e68 100644
--- a/drivers/scsi/dec_esp.c
+++ b/drivers/scsi/dec_esp.c
@@ -202,19 +202,19 @@ static int dec_esp_detect(struct scsi_host_template * tpnt)
202 202
203 esp_initialize(esp); 203 esp_initialize(esp);
204 204
205 if (request_irq(esp->irq, esp_intr, SA_INTERRUPT, 205 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
206 "ncr53c94", esp->ehost)) 206 "ncr53c94", esp->ehost))
207 goto err_dealloc; 207 goto err_dealloc;
208 if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], 208 if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
209 scsi_dma_merr_int, SA_INTERRUPT, 209 scsi_dma_merr_int, IRQF_DISABLED,
210 "ncr53c94 error", esp->ehost)) 210 "ncr53c94 error", esp->ehost))
211 goto err_free_irq; 211 goto err_free_irq;
212 if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR], 212 if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
213 scsi_dma_err_int, SA_INTERRUPT, 213 scsi_dma_err_int, IRQF_DISABLED,
214 "ncr53c94 overrun", esp->ehost)) 214 "ncr53c94 overrun", esp->ehost))
215 goto err_free_irq_merr; 215 goto err_free_irq_merr;
216 if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], 216 if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA],
217 scsi_dma_int, SA_INTERRUPT, 217 scsi_dma_int, IRQF_DISABLED,
218 "ncr53c94 dma", esp->ehost)) 218 "ncr53c94 dma", esp->ehost))
219 goto err_free_irq_err; 219 goto err_free_irq_err;
220 220
@@ -276,7 +276,7 @@ static int dec_esp_detect(struct scsi_host_template * tpnt)
276 esp->dma_mmu_release_scsi_sgl = 0; 276 esp->dma_mmu_release_scsi_sgl = 0;
277 esp->dma_advance_sg = 0; 277 esp->dma_advance_sg = 0;
278 278
279 if (request_irq(esp->irq, esp_intr, SA_INTERRUPT, 279 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
280 "PMAZ_AA", esp->ehost)) { 280 "PMAZ_AA", esp->ehost)) {
281 esp_deallocate(esp); 281 esp_deallocate(esp);
282 release_tc_card(slot); 282 release_tc_card(slot);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 38e4010eff96..879a26657676 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -94,7 +94,7 @@ static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
94 94
95 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); 95 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E);
96 96
97 if (request_irq(pdev->irq, NCR5380_intr, SA_SHIRQ, 97 if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED,
98 DMX3191D_DRIVER_NAME, shost)) { 98 DMX3191D_DRIVER_NAME, shost)) {
99 /* 99 /*
100 * Steam powered scsi controllers run without an IRQ anyway 100 * Steam powered scsi controllers run without an IRQ anyway
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index a9585f5235d9..d84a281ad944 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -23,7 +23,6 @@
23 23
24#include <asm/semaphore.h> /* Needed for MUTEX init macros */ 24#include <asm/semaphore.h> /* Needed for MUTEX init macros */
25#include <linux/version.h> 25#include <linux/version.h>
26#include <linux/config.h>
27#include <linux/notifier.h> 26#include <linux/notifier.h>
28#include <asm/atomic.h> 27#include <asm/atomic.h>
29 28
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b1b704a42efd..7b3bd34faf47 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -46,7 +46,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
46 46
47#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */ 48#include <linux/slab.h> /* for kmalloc() */
49#include <linux/config.h> /* for CONFIG_PCI */
50#include <linux/pci.h> /* for PCI support */ 49#include <linux/pci.h> /* for PCI support */
51#include <linux/proc_fs.h> 50#include <linux/proc_fs.h>
52#include <linux/blkdev.h> 51#include <linux/blkdev.h>
@@ -185,7 +184,7 @@ static int adpt_detect(struct scsi_host_template* sht)
185 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
186 185
187 /* search for all Adatpec I2O RAID cards */ 186 /* search for all Adatpec I2O RAID cards */
188 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { 187 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
189 if(pDev->device == PCI_DPT_DEVICE_ID || 188 if(pDev->device == PCI_DPT_DEVICE_ID ||
190 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ 189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
191 if(adpt_install_hba(sht, pDev) ){ 190 if(adpt_install_hba(sht, pDev) ){
@@ -193,8 +192,11 @@ static int adpt_detect(struct scsi_host_template* sht)
193 PERROR("Will not try to detect others.\n"); 192 PERROR("Will not try to detect others.\n");
194 return hba_count-1; 193 return hba_count-1;
195 } 194 }
195 pci_dev_get(pDev);
196 } 196 }
197 } 197 }
198 if (pDev)
199 pci_dev_put(pDev);
198 200
199 /* In INIT state, Activate IOPs */ 201 /* In INIT state, Activate IOPs */
200 for (pHba = hba_chain; pHba; pHba = pHba->next) { 202 for (pHba = hba_chain; pHba; pHba = pHba->next) {
@@ -1009,7 +1011,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
1009 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1011 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1010 } 1012 }
1011 1013
1012 if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) { 1014 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1013 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq); 1015 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1014 adpt_i2o_delete_hba(pHba); 1016 adpt_i2o_delete_hba(pHba);
1015 return -EINVAL; 1017 return -EINVAL;
@@ -1076,6 +1078,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076 } 1078 }
1077 } 1079 }
1078 } 1080 }
1081 pci_dev_put(pHba->pDev);
1079 kfree(pHba); 1082 kfree(pHba);
1080 1083
1081 if(hba_count <= 0){ 1084 if(hba_count <= 0){
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index c5108c8c887b..0d5713dfa204 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -280,7 +280,7 @@ found:
280 /* With interrupts enabled, it will sometimes hang when doing heavy 280 /* With interrupts enabled, it will sometimes hang when doing heavy
281 * reads. So better not enable them until I finger it out. */ 281 * reads. So better not enable them until I finger it out. */
282 if (instance->irq != SCSI_IRQ_NONE) 282 if (instance->irq != SCSI_IRQ_NONE)
283 if (request_irq(instance->irq, dtc_intr, SA_INTERRUPT, "dtc", instance)) { 283 if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, "dtc", instance)) {
284 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 284 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
285 instance->irq = SCSI_IRQ_NONE; 285 instance->irq = SCSI_IRQ_NONE;
286 } 286 }
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 059eeee4b554..a5ff43b1b263 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -477,7 +477,6 @@
477 * the driver sets host->wish_block = 1 for all ISA boards. 477 * the driver sets host->wish_block = 1 for all ISA boards.
478 */ 478 */
479 479
480#include <linux/config.h>
481#include <linux/string.h> 480#include <linux/string.h>
482#include <linux/kernel.h> 481#include <linux/kernel.h>
483#include <linux/ioport.h> 482#include <linux/ioport.h>
@@ -1222,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1222 1221
1223 /* Board detected, allocate its IRQ */ 1222 /* Board detected, allocate its IRQ */
1224 if (request_irq(irq, do_interrupt_handler, 1223 if (request_irq(irq, do_interrupt_handler,
1225 SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0), 1224 IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
1226 driver_name, (void *)&sha[j])) { 1225 driver_name, (void *)&sha[j])) {
1227 printk("%s: unable to allocate IRQ %u, detaching.\n", name, 1226 printk("%s: unable to allocate IRQ %u, detaching.\n", name,
1228 irq); 1227 irq);
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 34bce2c9e92e..635c14861f86 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -364,6 +364,7 @@ typedef struct hstd {
364 __u8 moresupport; /* HBA supports MORE flag */ 364 __u8 moresupport; /* HBA supports MORE flag */
365 struct Scsi_Host *next; 365 struct Scsi_Host *next;
366 struct Scsi_Host *prev; 366 struct Scsi_Host *prev;
367 struct pci_dev *pdev; /* PCI device or NULL for non PCI */
367 struct eata_sp sp; /* status packet */ 368 struct eata_sp sp; /* status packet */
368 struct eata_ccb ccb[0]; /* ccb array begins here */ 369 struct eata_ccb ccb[0]; /* ccb array begins here */
369}hostdata; 370}hostdata;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 23beb48c79c5..d312633db92b 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -46,7 +46,6 @@
46 * last change: 2002/11/02 OS: Linux 2.5.45 * 46 * last change: 2002/11/02 OS: Linux 2.5.45 *
47 ************************************************************/ 47 ************************************************************/
48 48
49#include <linux/config.h>
50#include <linux/module.h> 49#include <linux/module.h>
51#include <linux/kernel.h> 50#include <linux/kernel.h>
52#include <linux/sched.h> 51#include <linux/sched.h>
@@ -72,11 +71,11 @@
72#include "eata_pio.h" 71#include "eata_pio.h"
73 72
74 73
75static uint ISAbases[MAXISA] = { 74static unsigned int ISAbases[MAXISA] = {
76 0x1F0, 0x170, 0x330, 0x230 75 0x1F0, 0x170, 0x330, 0x230
77}; 76};
78 77
79static uint ISAirqs[MAXISA] = { 78static unsigned int ISAirqs[MAXISA] = {
80 14, 12, 15, 11 79 14, 12, 15, 11
81}; 80};
82 81
@@ -85,7 +84,7 @@ static unsigned char EISAbases[] = {
85 1, 1, 1, 1, 1, 1, 1, 1 84 1, 1, 1, 1, 1, 1, 1, 1
86}; 85};
87 86
88static uint registered_HBAs; 87static unsigned int registered_HBAs;
89static struct Scsi_Host *last_HBA; 88static struct Scsi_Host *last_HBA;
90static struct Scsi_Host *first_HBA; 89static struct Scsi_Host *first_HBA;
91static unsigned char reg_IRQ[16]; 90static unsigned char reg_IRQ[16];
@@ -166,6 +165,7 @@ static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **star
166 165
167static int eata_pio_release(struct Scsi_Host *sh) 166static int eata_pio_release(struct Scsi_Host *sh)
168{ 167{
168 hostdata *hd = SD(sh);
169 if (sh->irq && reg_IRQ[sh->irq] == 1) 169 if (sh->irq && reg_IRQ[sh->irq] == 1)
170 free_irq(sh->irq, NULL); 170 free_irq(sh->irq, NULL);
171 else 171 else
@@ -174,10 +174,13 @@ static int eata_pio_release(struct Scsi_Host *sh)
174 if (sh->io_port && sh->n_io_port) 174 if (sh->io_port && sh->n_io_port)
175 release_region(sh->io_port, sh->n_io_port); 175 release_region(sh->io_port, sh->n_io_port);
176 } 176 }
177 /* At this point the PCI reference can go */
178 if (hd->pdev)
179 pci_dev_put(hd->pdev);
177 return 1; 180 return 1;
178} 181}
179 182
180static void IncStat(struct scsi_pointer *SCp, uint Increment) 183static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
181{ 184{
182 SCp->ptr += Increment; 185 SCp->ptr += Increment;
183 if ((SCp->this_residual -= Increment) == 0) { 186 if ((SCp->this_residual -= Increment) == 0) {
@@ -191,46 +194,49 @@ static void IncStat(struct scsi_pointer *SCp, uint Increment)
191 } 194 }
192} 195}
193 196
194static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs); 197static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs);
195 198
196static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id, 199static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id,
197 struct pt_regs *regs) 200 struct pt_regs *regs)
198{ 201{
199 unsigned long flags; 202 unsigned long flags;
200 struct Scsi_Host *dev = dev_id; 203 struct Scsi_Host *dev = dev_id;
204 irqreturn_t ret;
201 205
202 spin_lock_irqsave(dev->host_lock, flags); 206 spin_lock_irqsave(dev->host_lock, flags);
203 eata_pio_int_handler(irq, dev_id, regs); 207 ret = eata_pio_int_handler(irq, dev_id, regs);
204 spin_unlock_irqrestore(dev->host_lock, flags); 208 spin_unlock_irqrestore(dev->host_lock, flags);
205 return IRQ_HANDLED; 209 return ret;
206} 210}
207 211
208static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs) 212static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
209{ 213{
210 uint eata_stat = 0xfffff; 214 unsigned int eata_stat = 0xfffff;
211 struct scsi_cmnd *cmd; 215 struct scsi_cmnd *cmd;
212 hostdata *hd; 216 hostdata *hd;
213 struct eata_ccb *cp; 217 struct eata_ccb *cp;
214 uint base; 218 unsigned long base;
215 uint x, z; 219 unsigned int x, z;
216 struct Scsi_Host *sh; 220 struct Scsi_Host *sh;
217 unsigned short zwickel = 0; 221 unsigned short zwickel = 0;
218 unsigned char stat, odd; 222 unsigned char stat, odd;
223 irqreturn_t ret = IRQ_NONE;
219 224
220 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) 225 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev)
221 { 226 {
222 if (sh->irq != irq) 227 if (sh->irq != irq)
223 continue; 228 continue;
224 if (inb((uint) sh->base + HA_RSTATUS) & HA_SBUSY) 229 if (inb(sh->base + HA_RSTATUS) & HA_SBUSY)
225 continue; 230 continue;
226 231
227 int_counter++; 232 int_counter++;
233 ret = IRQ_HANDLED;
228 234
229 hd = SD(sh); 235 hd = SD(sh);
230 236
231 cp = &hd->ccb[0]; 237 cp = &hd->ccb[0];
232 cmd = cp->cmd; 238 cmd = cp->cmd;
233 base = (uint) cmd->device->host->base; 239 base = cmd->device->host->base;
234 240
235 do { 241 do {
236 stat = inb(base + HA_RSTATUS); 242 stat = inb(base + HA_RSTATUS);
@@ -305,7 +311,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
305 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { 311 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) {
306 cmd->result = (DID_OK << 16); 312 cmd->result = (DID_OK << 16);
307 hd->devflags |= (1 << cp->cp_id); 313 hd->devflags |= (1 << cp->cp_id);
308 } else if (hd->devflags & 1 << cp->cp_id) 314 } else if (hd->devflags & (1 << cp->cp_id))
309 cmd->result = (DID_OK << 16) + 0x02; 315 cmd->result = (DID_OK << 16) + 0x02;
310 else 316 else
311 cmd->result = (DID_NO_CONNECT << 16); 317 cmd->result = (DID_NO_CONNECT << 16);
@@ -314,7 +320,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
314 cp->status = FREE; 320 cp->status = FREE;
315 eata_stat = inb(base + HA_RSTATUS); 321 eata_stat = inb(base + HA_RSTATUS);
316 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n"); 322 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n");
317 return; 323 return ret;
318 } 324 }
319#if DBG_INTR2 325#if DBG_INTR2
320 if (stat != 0x50) 326 if (stat != 0x50)
@@ -326,12 +332,12 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
326 cmd->scsi_done(cmd); 332 cmd->scsi_done(cmd);
327 } 333 }
328 334
329 return; 335 return ret;
330} 336}
331 337
332static inline uint eata_pio_send_command(uint base, unsigned char command) 338static inline unsigned int eata_pio_send_command(unsigned long base, unsigned char command)
333{ 339{
334 uint loop = HZ / 2; 340 unsigned int loop = 50;
335 341
336 while (inb(base + HA_RSTATUS) & HA_SBUSY) 342 while (inb(base + HA_RSTATUS) & HA_SBUSY)
337 if (--loop == 0) 343 if (--loop == 0)
@@ -350,8 +356,8 @@ static inline uint eata_pio_send_command(uint base, unsigned char command)
350static int eata_pio_queue(struct scsi_cmnd *cmd, 356static int eata_pio_queue(struct scsi_cmnd *cmd,
351 void (*done)(struct scsi_cmnd *)) 357 void (*done)(struct scsi_cmnd *))
352{ 358{
353 uint x, y; 359 unsigned int x, y;
354 uint base; 360 unsigned long base;
355 361
356 hostdata *hd; 362 hostdata *hd;
357 struct Scsi_Host *sh; 363 struct Scsi_Host *sh;
@@ -361,7 +367,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
361 367
362 hd = HD(cmd); 368 hd = HD(cmd);
363 sh = cmd->device->host; 369 sh = cmd->device->host;
364 base = (uint) sh->base; 370 base = sh->base;
365 371
366 /* use only slot 0, as 2001 can handle only one cmd at a time */ 372 /* use only slot 0, as 2001 can handle only one cmd at a time */
367 373
@@ -396,9 +402,9 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
396 cp->DataIn = 0; /* Input mode */ 402 cp->DataIn = 0; /* Input mode */
397 403
398 cp->Interpret = (cmd->device->id == hd->hostid); 404 cp->Interpret = (cmd->device->id == hd->hostid);
399 cp->cp_datalen = htonl((unsigned long) cmd->request_bufflen); 405 cp->cp_datalen = cpu_to_be32(cmd->request_bufflen);
400 cp->Auto_Req_Sen = 0; 406 cp->Auto_Req_Sen = 0;
401 cp->cp_reqDMA = htonl(0); 407 cp->cp_reqDMA = 0;
402 cp->reqlen = 0; 408 cp->reqlen = 0;
403 409
404 cp->cp_id = cmd->device->id; 410 cp->cp_id = cmd->device->id;
@@ -407,7 +413,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
407 cp->cp_identify = 1; 413 cp->cp_identify = 1;
408 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 414 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
409 415
410 cp->cp_statDMA = htonl(0); 416 cp->cp_statDMA = 0;
411 417
412 cp->cp_viraddr = cp; 418 cp->cp_viraddr = cp;
413 cp->cmd = cmd; 419 cp->cmd = cmd;
@@ -446,14 +452,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
446 452
447 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 453 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
448 "Queued base %#.4lx pid: %ld " 454 "Queued base %#.4lx pid: %ld "
449 "slot %d irq %d\n", (long) sh->base, cmd->pid, y, sh->irq)); 455 "slot %d irq %d\n", sh->base, cmd->pid, y, sh->irq));
450 456
451 return (0); 457 return (0);
452} 458}
453 459
454static int eata_pio_abort(struct scsi_cmnd *cmd) 460static int eata_pio_abort(struct scsi_cmnd *cmd)
455{ 461{
456 uint loop = HZ; 462 unsigned int loop = 100;
457 463
458 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 464 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
459 "eata_pio_abort called pid: %ld\n", 465 "eata_pio_abort called pid: %ld\n",
@@ -486,7 +492,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd)
486 492
487static int eata_pio_host_reset(struct scsi_cmnd *cmd) 493static int eata_pio_host_reset(struct scsi_cmnd *cmd)
488{ 494{
489 uint x, limit = 0; 495 unsigned int x, limit = 0;
490 unsigned char success = 0; 496 unsigned char success = 0;
491 struct scsi_cmnd *sp; 497 struct scsi_cmnd *sp;
492 struct Scsi_Host *host = cmd->device->host; 498 struct Scsi_Host *host = cmd->device->host;
@@ -519,7 +525,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
519 } 525 }
520 526
521 /* hard reset the HBA */ 527 /* hard reset the HBA */
522 outb(EATA_CMD_RESET, (uint) cmd->device->host->base + HA_WCOMMAND); 528 outb(EATA_CMD_RESET, cmd->device->host->base + HA_WCOMMAND);
523 529
524 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n")); 530 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
525 HD(cmd)->state = RESET; 531 HD(cmd)->state = RESET;
@@ -559,7 +565,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
559 } 565 }
560} 566}
561 567
562static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned long cplen, unsigned short cppadlen) 568static char *get_pio_board_data(unsigned long base, unsigned int irq, unsigned int id, unsigned long cplen, unsigned short cppadlen)
563{ 569{
564 struct eata_ccb cp; 570 struct eata_ccb cp;
565 static char buff[256]; 571 static char buff[256];
@@ -571,8 +577,8 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
571 cp.DataIn = 1; 577 cp.DataIn = 1;
572 cp.Interpret = 1; /* Interpret command */ 578 cp.Interpret = 1; /* Interpret command */
573 579
574 cp.cp_datalen = htonl(254); 580 cp.cp_datalen = cpu_to_be32(254);
575 cp.cp_dataDMA = htonl(0); 581 cp.cp_dataDMA = cpu_to_be32(0);
576 582
577 cp.cp_id = id; 583 cp.cp_id = id;
578 cp.cp_lun = 0; 584 cp.cp_lun = 0;
@@ -584,7 +590,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
584 cp.cp_cdb[4] = 254; 590 cp.cp_cdb[4] = 254;
585 cp.cp_cdb[5] = 0; 591 cp.cp_cdb[5] = 0;
586 592
587 if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP)) 593 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
588 return (NULL); 594 return (NULL);
589 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)); 595 while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
590 outsw(base + HA_RDATA, &cp, cplen); 596 outsw(base + HA_RDATA, &cp, cplen);
@@ -605,7 +611,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
605 } 611 }
606} 612}
607 613
608static int get_pio_conf_PIO(u32 base, struct get_conf *buf) 614static int get_pio_conf_PIO(unsigned long base, struct get_conf *buf)
609{ 615{
610 unsigned long loop = HZ / 2; 616 unsigned long loop = HZ / 2;
611 int z; 617 int z;
@@ -620,30 +626,30 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
620 if (--loop == 0) 626 if (--loop == 0)
621 goto fail; 627 goto fail;
622 628
623 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#x\n", base)); 629 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#lx\n", base));
624 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG); 630 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
625 631
626 loop = HZ / 2; 632 loop = 50;
627 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) { 633 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) {
628 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) 634 while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
629 if (--loop == 0) 635 if (--loop == 0)
630 goto fail; 636 goto fail;
631 637
632 loop = HZ / 2; 638 loop = 50;
633 *p = inw(base + HA_RDATA); 639 *p = inw(base + HA_RDATA);
634 } 640 }
635 if (inb(base + HA_RSTATUS) & HA_SERROR) { 641 if (inb(base + HA_RSTATUS) & HA_SERROR) {
636 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during " 642 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during "
637 "transfer for HBA at %x\n", base)); 643 "transfer for HBA at %lx\n", base));
638 goto fail; 644 goto fail;
639 } 645 }
640 646
641 if (htonl(EATA_SIGNATURE) != buf->signature) 647 if (cpu_to_be32(EATA_SIGNATURE) != buf->signature)
642 goto fail; 648 goto fail;
643 649
644 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found " 650 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
645 "at %#4x EATA Level: %x\n", 651 "at %#4lx EATA Level: %x\n",
646 base, (uint) (buf->version))); 652 base, (unsigned int) (buf->version)));
647 653
648 while (inb(base + HA_RSTATUS) & HA_SDRQ) 654 while (inb(base + HA_RSTATUS) & HA_SDRQ)
649 inw(base + HA_RDATA); 655 inw(base + HA_RDATA);
@@ -666,12 +672,12 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
666static void print_pio_config(struct get_conf *gc) 672static void print_pio_config(struct get_conf *gc)
667{ 673{
668 printk("Please check values: (read config data)\n"); 674 printk("Please check values: (read config data)\n");
669 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", (uint) ntohl(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support); 675 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", be32_to_cpu(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
670 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND); 676 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], be16_to_cpu(gc->queuesiz), be16_to_cpu(gc->SGsiz), gc->SECOND);
671 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest); 677 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest);
672} 678}
673 679
674static uint print_selftest(uint base) 680static unsigned int print_selftest(unsigned int base)
675{ 681{
676 unsigned char buffer[512]; 682 unsigned char buffer[512];
677#ifdef VERBOSE_SETUP 683#ifdef VERBOSE_SETUP
@@ -698,7 +704,7 @@ static uint print_selftest(uint base)
698 return (!(inb(base + HA_RSTATUS) & HA_SERROR)); 704 return (!(inb(base + HA_RSTATUS) & HA_SERROR));
699} 705}
700 706
701static int register_pio_HBA(long base, struct get_conf *gc) 707static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev)
702{ 708{
703 unsigned long size = 0; 709 unsigned long size = 0;
704 char *buff; 710 char *buff;
@@ -715,24 +721,24 @@ static int register_pio_HBA(long base, struct get_conf *gc)
715 return 0; 721 return 0;
716 } 722 }
717 723
718 if ((buff = get_pio_board_data((uint) base, gc->IRQ, gc->scsi_id[3], cplen = (htonl(gc->cplen) + 1) / 2, cppadlen = (htons(gc->cppadlen) + 1) / 2)) == NULL) { 724 if ((buff = get_pio_board_data(base, gc->IRQ, gc->scsi_id[3], cplen = (cpu_to_be32(gc->cplen) + 1) / 2, cppadlen = (cpu_to_be16(gc->cppadlen) + 1) / 2)) == NULL) {
719 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (unsigned long) base); 725 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", base);
720 return 0; 726 return 0;
721 } 727 }
722 728
723 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) { 729 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) {
724 printk("HBA at %#lx failed while performing self test & setup.\n", (unsigned long) base); 730 printk("HBA at %#lx failed while performing self test & setup.\n", base);
725 return 0; 731 return 0;
726 } 732 }
727 733
728 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)); 734 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz));
729 735
730 sh = scsi_register(&driver_template, size); 736 sh = scsi_register(&driver_template, size);
731 if (sh == NULL) 737 if (sh == NULL)
732 return 0; 738 return 0;
733 739
734 if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */ 740 if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
735 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, SA_INTERRUPT, "EATA-PIO", sh)) { 741 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) {
736 reg_IRQ[gc->IRQ]++; 742 reg_IRQ[gc->IRQ]++;
737 if (!gc->IRQ_TR) 743 if (!gc->IRQ_TR)
738 reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */ 744 reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */
@@ -750,8 +756,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
750 756
751 hd = SD(sh); 757 hd = SD(sh);
752 758
753 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz))); 759 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz)));
754 memset(hd->reads, 0, sizeof(unsigned long) * 26); 760 memset(hd->reads, 0, sizeof(hd->reads));
755 761
756 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); 762 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor));
757 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); 763 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name));
@@ -762,7 +768,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
762 SD(sh)->revision[4] = buff[35]; 768 SD(sh)->revision[4] = buff[35];
763 SD(sh)->revision[5] = 0; 769 SD(sh)->revision[5] = 0;
764 770
765 switch (ntohl(gc->len)) { 771 switch (be32_to_cpu(gc->len)) {
766 case 0x1c: 772 case 0x1c:
767 SD(sh)->EATA_revision = 'a'; 773 SD(sh)->EATA_revision = 'a';
768 break; 774 break;
@@ -778,7 +784,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
778 SD(sh)->EATA_revision = '?'; 784 SD(sh)->EATA_revision = '?';
779 } 785 }
780 786
781 if (ntohl(gc->len) >= 0x22) { 787 if (be32_to_cpu(gc->len) >= 0x22) {
782 if (gc->is_PCI) 788 if (gc->is_PCI)
783 hd->bustype = IS_PCI; 789 hd->bustype = IS_PCI;
784 else if (gc->is_EISA) 790 else if (gc->is_EISA)
@@ -812,6 +818,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
812 818
813 hd->channel = 0; 819 hd->channel = 0;
814 820
821 hd->pdev = pci_dev_get(pdev); /* Keep a PCI reference */
822
815 sh->max_id = 8; 823 sh->max_id = 8;
816 sh->max_lun = 8; 824 sh->max_lun = 8;
817 825
@@ -842,7 +850,7 @@ static void find_pio_ISA(struct get_conf *buf)
842 continue; 850 continue;
843 if (!get_pio_conf_PIO(ISAbases[i], buf)) 851 if (!get_pio_conf_PIO(ISAbases[i], buf))
844 continue; 852 continue;
845 if (!register_pio_HBA(ISAbases[i], buf)) 853 if (!register_pio_HBA(ISAbases[i], buf, NULL))
846 release_region(ISAbases[i], 9); 854 release_region(ISAbases[i], 9);
847 else 855 else
848 ISAbases[i] = 0; 856 ISAbases[i] = 0;
@@ -874,7 +882,7 @@ static void find_pio_EISA(struct get_conf *buf)
874 if (get_pio_conf_PIO(base, buf)) { 882 if (get_pio_conf_PIO(base, buf)) {
875 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf)); 883 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
876 if (buf->IRQ) { 884 if (buf->IRQ) {
877 if (!register_pio_HBA(base, buf)) 885 if (!register_pio_HBA(base, buf, NULL))
878 release_region(base, 9); 886 release_region(base, 9);
879 } else { 887 } else {
880 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n"); 888 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n");
@@ -897,9 +905,9 @@ static void find_pio_PCI(struct get_conf *buf)
897 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n"); 905 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
898#else 906#else
899 struct pci_dev *dev = NULL; 907 struct pci_dev *dev = NULL;
900 u32 base, x; 908 unsigned long base, x;
901 909
902 while ((dev = pci_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) { 910 while ((dev = pci_get_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) {
903 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev))); 911 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev)));
904 if (pci_enable_device(dev)) 912 if (pci_enable_device(dev))
905 continue; 913 continue;
@@ -927,7 +935,7 @@ static void find_pio_PCI(struct get_conf *buf)
927 * eventually remove it from the EISA and ISA list 935 * eventually remove it from the EISA and ISA list
928 */ 936 */
929 937
930 if (!register_pio_HBA(base, buf)) { 938 if (!register_pio_HBA(base, buf, dev)) {
931 release_region(base, 9); 939 release_region(base, 9);
932 continue; 940 continue;
933 } 941 }
@@ -966,7 +974,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
966 974
967 for (i = 0; i <= MAXIRQ; i++) 975 for (i = 0; i <= MAXIRQ; i++)
968 if (reg_IRQ[i]) 976 if (reg_IRQ[i])
969 request_irq(i, do_eata_pio_int_handler, SA_INTERRUPT, "EATA-PIO", NULL); 977 request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
970 978
971 HBA_ptr = first_HBA; 979 HBA_ptr = first_HBA;
972 980
@@ -977,12 +985,12 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
977 printk("Registered HBAs:\n"); 985 printk("Registered HBAs:\n");
978 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n"); 986 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n");
979 for (i = 1; i <= registered_HBAs; i++) { 987 for (i = 1; i <= registered_HBAs; i++) {
980 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c" 988 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4lx %2d %d %d %c"
981 " %2d %2d %2d\n", 989 " %2d %2d %2d\n",
982 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision, 990 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
983 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ? 991 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ?
984 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ", 992 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ",
985 (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id, 993 HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id,
986 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue, 994 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue,
987 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun); 995 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun);
988 HBA_ptr = SD(HBA_ptr)->next; 996 HBA_ptr = SD(HBA_ptr)->next;
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 0a3e45d7a972..5630868c1b25 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1,7 +1,6 @@
1/* $Id: esp.c,v 1.101 2002/01/15 06:48:55 davem Exp $ 1/* esp.c: ESP Sun SCSI driver.
2 * esp.c: EnhancedScsiProcessor Sun SCSI driver code.
3 * 2 *
4 * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7/* TODO: 6/* TODO:
@@ -13,7 +12,6 @@
13 * 3) Add tagged queueing. 12 * 3) Add tagged queueing.
14 */ 13 */
15 14
16#include <linux/config.h>
17#include <linux/kernel.h> 15#include <linux/kernel.h>
18#include <linux/delay.h> 16#include <linux/delay.h>
19#include <linux/types.h> 17#include <linux/types.h>
@@ -185,11 +183,6 @@ enum {
185/*5*/ do_intr_end 183/*5*/ do_intr_end
186}; 184};
187 185
188/* The master ring of all esp hosts we are managing in this driver. */
189static struct esp *espchain;
190static DEFINE_SPINLOCK(espchain_lock);
191static int esps_running = 0;
192
193/* Forward declarations. */ 186/* Forward declarations. */
194static irqreturn_t esp_intr(int irq, void *dev_id, struct pt_regs *pregs); 187static irqreturn_t esp_intr(int irq, void *dev_id, struct pt_regs *pregs);
195 188
@@ -694,36 +687,6 @@ static void __init esp_bootup_reset(struct esp *esp)
694 sbus_readb(esp->eregs + ESP_INTRPT); 687 sbus_readb(esp->eregs + ESP_INTRPT);
695} 688}
696 689
697static void esp_chain_add(struct esp *esp)
698{
699 spin_lock_irq(&espchain_lock);
700 if (espchain) {
701 struct esp *elink = espchain;
702 while (elink->next)
703 elink = elink->next;
704 elink->next = esp;
705 } else {
706 espchain = esp;
707 }
708 esp->next = NULL;
709 spin_unlock_irq(&espchain_lock);
710}
711
712static void esp_chain_del(struct esp *esp)
713{
714 spin_lock_irq(&espchain_lock);
715 if (espchain == esp) {
716 espchain = esp->next;
717 } else {
718 struct esp *elink = espchain;
719 while (elink->next != esp)
720 elink = elink->next;
721 elink->next = esp->next;
722 }
723 esp->next = NULL;
724 spin_unlock_irq(&espchain_lock);
725}
726
727static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev) 690static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev)
728{ 691{
729 struct sbus_dev *sdev = esp->sdev; 692 struct sbus_dev *sdev = esp->sdev;
@@ -815,7 +778,7 @@ static int __init esp_register_irq(struct esp *esp)
815 * sanely maintain. 778 * sanely maintain.
816 */ 779 */
817 if (request_irq(esp->ehost->irq, esp_intr, 780 if (request_irq(esp->ehost->irq, esp_intr,
818 SA_SHIRQ, "ESP SCSI", esp)) { 781 IRQF_SHARED, "ESP SCSI", esp)) {
819 printk("esp%d: Cannot acquire irq line\n", 782 printk("esp%d: Cannot acquire irq line\n",
820 esp->esp_id); 783 esp->esp_id);
821 return -1; 784 return -1;
@@ -830,19 +793,20 @@ static int __init esp_register_irq(struct esp *esp)
830static void __init esp_get_scsi_id(struct esp *esp) 793static void __init esp_get_scsi_id(struct esp *esp)
831{ 794{
832 struct sbus_dev *sdev = esp->sdev; 795 struct sbus_dev *sdev = esp->sdev;
796 struct device_node *dp = sdev->ofdev.node;
833 797
834 esp->scsi_id = prom_getintdefault(esp->prom_node, 798 esp->scsi_id = of_getintprop_default(dp,
835 "initiator-id", 799 "initiator-id",
836 -1); 800 -1);
837 if (esp->scsi_id == -1) 801 if (esp->scsi_id == -1)
838 esp->scsi_id = prom_getintdefault(esp->prom_node, 802 esp->scsi_id = of_getintprop_default(dp,
839 "scsi-initiator-id", 803 "scsi-initiator-id",
840 -1); 804 -1);
841 if (esp->scsi_id == -1) 805 if (esp->scsi_id == -1)
842 esp->scsi_id = (sdev->bus == NULL) ? 7 : 806 esp->scsi_id = (sdev->bus == NULL) ? 7 :
843 prom_getintdefault(sdev->bus->prom_node, 807 of_getintprop_default(sdev->bus->ofdev.node,
844 "scsi-initiator-id", 808 "scsi-initiator-id",
845 7); 809 7);
846 esp->ehost->this_id = esp->scsi_id; 810 esp->ehost->this_id = esp->scsi_id;
847 esp->scsi_id_mask = (1 << esp->scsi_id); 811 esp->scsi_id_mask = (1 << esp->scsi_id);
848 812
@@ -1067,28 +1031,30 @@ static void __init esp_init_swstate(struct esp *esp)
1067 esp->prev_hme_dmacsr = 0xffffffff; 1031 esp->prev_hme_dmacsr = 0xffffffff;
1068} 1032}
1069 1033
1070static int __init detect_one_esp(struct scsi_host_template *tpnt, struct sbus_dev *esp_dev, 1034static int __init detect_one_esp(struct scsi_host_template *tpnt,
1071 struct sbus_dev *espdma, struct sbus_bus *sbus, 1035 struct device *dev,
1072 int id, int hme) 1036 struct sbus_dev *esp_dev,
1037 struct sbus_dev *espdma,
1038 struct sbus_bus *sbus,
1039 int hme)
1073{ 1040{
1074 struct Scsi_Host *esp_host = scsi_register(tpnt, sizeof(struct esp)); 1041 static int instance;
1042 struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp));
1075 struct esp *esp; 1043 struct esp *esp;
1076 1044
1077 if (!esp_host) { 1045 if (!esp_host)
1078 printk("ESP: Cannot register SCSI host\n"); 1046 return -ENOMEM;
1079 return -1; 1047
1080 }
1081 if (hme) 1048 if (hme)
1082 esp_host->max_id = 16; 1049 esp_host->max_id = 16;
1083 esp = (struct esp *) esp_host->hostdata; 1050 esp = (struct esp *) esp_host->hostdata;
1084 esp->ehost = esp_host; 1051 esp->ehost = esp_host;
1085 esp->sdev = esp_dev; 1052 esp->sdev = esp_dev;
1086 esp->esp_id = id; 1053 esp->esp_id = instance;
1087 esp->prom_node = esp_dev->prom_node; 1054 esp->prom_node = esp_dev->prom_node;
1088 prom_getstring(esp->prom_node, "name", esp->prom_name, 1055 prom_getstring(esp->prom_node, "name", esp->prom_name,
1089 sizeof(esp->prom_name)); 1056 sizeof(esp->prom_name));
1090 1057
1091 esp_chain_add(esp);
1092 if (esp_find_dvma(esp, espdma) < 0) 1058 if (esp_find_dvma(esp, espdma) < 0)
1093 goto fail_unlink; 1059 goto fail_unlink;
1094 if (esp_map_regs(esp, hme) < 0) { 1060 if (esp_map_regs(esp, hme) < 0) {
@@ -1115,8 +1081,19 @@ static int __init detect_one_esp(struct scsi_host_template *tpnt, struct sbus_de
1115 1081
1116 esp_bootup_reset(esp); 1082 esp_bootup_reset(esp);
1117 1083
1084 if (scsi_add_host(esp_host, dev))
1085 goto fail_free_irq;
1086
1087 dev_set_drvdata(&esp_dev->ofdev.dev, esp);
1088
1089 scsi_scan_host(esp_host);
1090 instance++;
1091
1118 return 0; 1092 return 0;
1119 1093
1094fail_free_irq:
1095 free_irq(esp->ehost->irq, esp);
1096
1120fail_unmap_cmdarea: 1097fail_unmap_cmdarea:
1121 sbus_free_consistent(esp->sdev, 16, 1098 sbus_free_consistent(esp->sdev, 16,
1122 (void *) esp->esp_command, 1099 (void *) esp->esp_command,
@@ -1129,119 +1106,99 @@ fail_dvma_release:
1129 esp->dma->allocated = 0; 1106 esp->dma->allocated = 0;
1130 1107
1131fail_unlink: 1108fail_unlink:
1132 esp_chain_del(esp); 1109 scsi_host_put(esp_host);
1133 scsi_unregister(esp_host);
1134 return -1; 1110 return -1;
1135} 1111}
1136 1112
1137/* Detecting ESP chips on the machine. This is the simple and easy 1113/* Detecting ESP chips on the machine. This is the simple and easy
1138 * version. 1114 * version.
1139 */ 1115 */
1116static int __devexit esp_remove_common(struct esp *esp)
1117{
1118 unsigned int irq = esp->ehost->irq;
1119
1120 scsi_remove_host(esp->ehost);
1121
1122 ESP_INTSOFF(esp->dregs);
1123#if 0
1124 esp_reset_dma(esp);
1125 esp_reset_esp(esp);
1126#endif
1127
1128 free_irq(irq, esp);
1129 sbus_free_consistent(esp->sdev, 16,
1130 (void *) esp->esp_command, esp->esp_command_dvma);
1131 sbus_iounmap(esp->eregs, ESP_REG_SIZE);
1132 esp->dma->allocated = 0;
1133
1134 scsi_host_put(esp->ehost);
1135
1136 return 0;
1137}
1138
1140 1139
1141#ifdef CONFIG_SUN4 1140#ifdef CONFIG_SUN4
1142 1141
1143#include <asm/sun4paddr.h> 1142#include <asm/sun4paddr.h>
1144 1143
1145static int __init esp_detect(struct scsi_host_template *tpnt) 1144static struct sbus_dev sun4_esp_dev;
1146{
1147 static struct sbus_dev esp_dev;
1148 int esps_in_use = 0;
1149
1150 espchain = NULL;
1151 1145
1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1147{
1152 if (sun4_esp_physaddr) { 1148 if (sun4_esp_physaddr) {
1153 memset (&esp_dev, 0, sizeof(esp_dev)); 1149 memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
1154 esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; 1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
1155 esp_dev.irqs[0] = 4; 1151 sun4_esp_dev.irqs[0] = 4;
1156 esp_dev.resource[0].start = sun4_esp_physaddr; 1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
1157 esp_dev.resource[0].end = sun4_esp_physaddr + ESP_REG_SIZE - 1; 1153 sun4_esp_dev.resource[0].end =
1158 esp_dev.resource[0].flags = IORESOURCE_IO; 1154 sun4_esp_physaddr + ESP_REG_SIZE - 1;
1159 1155 sun4_esp_dev.resource[0].flags = IORESOURCE_IO;
1160 if (!detect_one_esp(tpnt, &esp_dev, NULL, NULL, 0, 0)) 1156
1161 esps_in_use++; 1157 return detect_one_esp(tpnt, NULL,
1162 printk("ESP: Total of 1 ESP hosts found, %d actually in use.\n", esps_in_use); 1158 &sun4_esp_dev, NULL, NULL, 0);
1163 esps_running = esps_in_use;
1164 } 1159 }
1165 return esps_in_use; 1160 return 0;
1166} 1161}
1167 1162
1168#else /* !CONFIG_SUN4 */ 1163static int __devexit esp_sun4_remove(void)
1169
1170static int __init esp_detect(struct scsi_host_template *tpnt)
1171{ 1164{
1172 struct sbus_bus *sbus; 1165 struct of_device *dev = &sun4_esp_dev.ofdev;
1173 struct sbus_dev *esp_dev, *sbdev_iter; 1166 struct esp *esp = dev_get_drvdata(&dev->dev);
1174 int nesps = 0, esps_in_use = 0;
1175 1167
1176 espchain = 0; 1168 return esp_remove_common(esp);
1177 if (!sbus_root) {
1178#ifdef CONFIG_PCI
1179 return 0;
1180#else
1181 panic("No SBUS in esp_detect()");
1182#endif
1183 }
1184 for_each_sbus(sbus) {
1185 for_each_sbusdev(sbdev_iter, sbus) {
1186 struct sbus_dev *espdma = NULL;
1187 int hme = 0;
1188
1189 /* Is it an esp sbus device? */
1190 esp_dev = sbdev_iter;
1191 if (strcmp(esp_dev->prom_name, "esp") &&
1192 strcmp(esp_dev->prom_name, "SUNW,esp")) {
1193 if (!strcmp(esp_dev->prom_name, "SUNW,fas")) {
1194 hme = 1;
1195 espdma = esp_dev;
1196 } else {
1197 if (!esp_dev->child ||
1198 (strcmp(esp_dev->prom_name, "espdma") &&
1199 strcmp(esp_dev->prom_name, "dma")))
1200 continue; /* nope... */
1201 espdma = esp_dev;
1202 esp_dev = esp_dev->child;
1203 if (strcmp(esp_dev->prom_name, "esp") &&
1204 strcmp(esp_dev->prom_name, "SUNW,esp"))
1205 continue; /* how can this happen? */
1206 }
1207 }
1208
1209 if (detect_one_esp(tpnt, esp_dev, espdma, sbus, nesps++, hme) < 0)
1210 continue;
1211
1212 esps_in_use++;
1213 } /* for each sbusdev */
1214 } /* for each sbus */
1215 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,
1216 esps_in_use);
1217 esps_running = esps_in_use;
1218 return esps_in_use;
1219} 1169}
1220 1170
1221#endif /* !CONFIG_SUN4 */ 1171#else /* !CONFIG_SUN4 */
1222 1172
1223/* 1173static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1224 */
1225static int esp_release(struct Scsi_Host *host)
1226{ 1174{
1227 struct esp *esp = (struct esp *) host->hostdata; 1175 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1176 struct device_node *dp = dev->node;
1177 struct sbus_dev *dma_sdev = NULL;
1178 int hme = 0;
1179
1180 if (dp->parent &&
1181 (!strcmp(dp->parent->name, "espdma") ||
1182 !strcmp(dp->parent->name, "dma")))
1183 dma_sdev = sdev->parent;
1184 else if (!strcmp(dp->name, "SUNW,fas")) {
1185 dma_sdev = sdev;
1186 hme = 1;
1187 }
1228 1188
1229 ESP_INTSOFF(esp->dregs); 1189 return detect_one_esp(match->data, &dev->dev,
1230#if 0 1190 sdev, dma_sdev, sdev->bus, hme);
1231 esp_reset_dma(esp); 1191}
1232 esp_reset_esp(esp);
1233#endif
1234 1192
1235 free_irq(esp->ehost->irq, esp); 1193static int __devexit esp_sbus_remove(struct of_device *dev)
1236 sbus_free_consistent(esp->sdev, 16, 1194{
1237 (void *) esp->esp_command, esp->esp_command_dvma); 1195 struct esp *esp = dev_get_drvdata(&dev->dev);
1238 sbus_iounmap(esp->eregs, ESP_REG_SIZE);
1239 esp->dma->allocated = 0;
1240 esp_chain_del(esp);
1241 1196
1242 return 0; 1197 return esp_remove_common(esp);
1243} 1198}
1244 1199
1200#endif /* !CONFIG_SUN4 */
1201
1245/* The info function will return whatever useful 1202/* The info function will return whatever useful
1246 * information the developer sees fit. If not provided, then 1203 * information the developer sees fit. If not provided, then
1247 * the name field will be used instead. 1204 * the name field will be used instead.
@@ -1415,18 +1372,11 @@ static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len)
1415static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, 1372static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1416 int length, int inout) 1373 int length, int inout)
1417{ 1374{
1418 struct esp *esp; 1375 struct esp *esp = (struct esp *) host->hostdata;
1419 1376
1420 if (inout) 1377 if (inout)
1421 return -EINVAL; /* not yet */ 1378 return -EINVAL; /* not yet */
1422 1379
1423 for_each_esp(esp) {
1424 if (esp->ehost == host)
1425 break;
1426 }
1427 if (!esp)
1428 return -EINVAL;
1429
1430 if (start) 1380 if (start)
1431 *start = buffer; 1381 *start = buffer;
1432 1382
@@ -1448,7 +1398,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1448 sp->SCp.ptr = NULL; 1398 sp->SCp.ptr = NULL;
1449 } 1399 }
1450 } else { 1400 } else {
1451 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 1401 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1452 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, 1402 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
1453 sp->SCp.buffer, 1403 sp->SCp.buffer,
1454 sp->use_sg, 1404 sp->use_sg,
@@ -1461,7 +1411,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1461static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) 1411static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1462{ 1412{
1463 if (sp->use_sg) { 1413 if (sp->use_sg) {
1464 sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, 1414 sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
1465 sp->sc_data_direction); 1415 sp->sc_data_direction);
1466 } else if (sp->request_bufflen) { 1416 } else if (sp->request_bufflen) {
1467 sbus_unmap_single(esp->sdev, 1417 sbus_unmap_single(esp->sdev,
@@ -2805,18 +2755,15 @@ static int esp_do_data_finale(struct esp *esp)
2805 */ 2755 */
2806static int esp_should_clear_sync(struct scsi_cmnd *sp) 2756static int esp_should_clear_sync(struct scsi_cmnd *sp)
2807{ 2757{
2808 u8 cmd1 = sp->cmnd[0]; 2758 u8 cmd = sp->cmnd[0];
2809 u8 cmd2 = sp->data_cmnd[0];
2810 2759
2811 /* These cases are for spinning up a disk and 2760 /* These cases are for spinning up a disk and
2812 * waiting for that spinup to complete. 2761 * waiting for that spinup to complete.
2813 */ 2762 */
2814 if (cmd1 == START_STOP || 2763 if (cmd == START_STOP)
2815 cmd2 == START_STOP)
2816 return 0; 2764 return 0;
2817 2765
2818 if (cmd1 == TEST_UNIT_READY || 2766 if (cmd == TEST_UNIT_READY)
2819 cmd2 == TEST_UNIT_READY)
2820 return 0; 2767 return 0;
2821 2768
2822 /* One more special case for SCSI tape drives, 2769 /* One more special case for SCSI tape drives,
@@ -2824,8 +2771,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
2824 * completion of a rewind or tape load operation. 2771 * completion of a rewind or tape load operation.
2825 */ 2772 */
2826 if (sp->device->type == TYPE_TAPE) { 2773 if (sp->device->type == TYPE_TAPE) {
2827 if (cmd1 == MODE_SENSE || 2774 if (cmd == MODE_SENSE)
2828 cmd2 == MODE_SENSE)
2829 return 0; 2775 return 0;
2830 } 2776 }
2831 2777
@@ -4377,15 +4323,12 @@ static void esp_slave_destroy(struct scsi_device *SDptr)
4377 SDptr->hostdata = NULL; 4323 SDptr->hostdata = NULL;
4378} 4324}
4379 4325
4380static struct scsi_host_template driver_template = { 4326static struct scsi_host_template esp_template = {
4381 .proc_name = "esp", 4327 .module = THIS_MODULE,
4382 .proc_info = esp_proc_info, 4328 .name = "esp",
4383 .name = "Sun ESP 100/100a/200", 4329 .info = esp_info,
4384 .detect = esp_detect,
4385 .slave_alloc = esp_slave_alloc, 4330 .slave_alloc = esp_slave_alloc,
4386 .slave_destroy = esp_slave_destroy, 4331 .slave_destroy = esp_slave_destroy,
4387 .release = esp_release,
4388 .info = esp_info,
4389 .queuecommand = esp_queue, 4332 .queuecommand = esp_queue,
4390 .eh_abort_handler = esp_abort, 4333 .eh_abort_handler = esp_abort,
4391 .eh_bus_reset_handler = esp_reset, 4334 .eh_bus_reset_handler = esp_reset,
@@ -4394,12 +4337,58 @@ static struct scsi_host_template driver_template = {
4394 .sg_tablesize = SG_ALL, 4337 .sg_tablesize = SG_ALL,
4395 .cmd_per_lun = 1, 4338 .cmd_per_lun = 1,
4396 .use_clustering = ENABLE_CLUSTERING, 4339 .use_clustering = ENABLE_CLUSTERING,
4340 .proc_name = "esp",
4341 .proc_info = esp_proc_info,
4342};
4343
4344#ifndef CONFIG_SUN4
4345static struct of_device_id esp_match[] = {
4346 {
4347 .name = "SUNW,esp",
4348 .data = &esp_template,
4349 },
4350 {
4351 .name = "SUNW,fas",
4352 .data = &esp_template,
4353 },
4354 {
4355 .name = "esp",
4356 .data = &esp_template,
4357 },
4358 {},
4359};
4360MODULE_DEVICE_TABLE(of, esp_match);
4361
4362static struct of_platform_driver esp_sbus_driver = {
4363 .name = "esp",
4364 .match_table = esp_match,
4365 .probe = esp_sbus_probe,
4366 .remove = __devexit_p(esp_sbus_remove),
4397}; 4367};
4368#endif
4369
4370static int __init esp_init(void)
4371{
4372#ifdef CONFIG_SUN4
4373 return esp_sun4_probe(&esp_template);
4374#else
4375 return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
4376#endif
4377}
4398 4378
4399#include "scsi_module.c" 4379static void __exit esp_exit(void)
4380{
4381#ifdef CONFIG_SUN4
4382 esp_sun4_remove();
4383#else
4384 of_unregister_driver(&esp_sbus_driver);
4385#endif
4386}
4400 4387
4401MODULE_DESCRIPTION("EnhancedScsiProcessor Sun SCSI driver"); 4388MODULE_DESCRIPTION("ESP Sun SCSI driver");
4402MODULE_AUTHOR("David S. Miller (davem@redhat.com)"); 4389MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
4403MODULE_LICENSE("GPL"); 4390MODULE_LICENSE("GPL");
4404MODULE_VERSION(DRV_VERSION); 4391MODULE_VERSION(DRV_VERSION);
4405 4392
4393module_init(esp_init);
4394module_exit(esp_exit);
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
index 73f7d6968ab6..a98cda9121fc 100644
--- a/drivers/scsi/esp.h
+++ b/drivers/scsi/esp.h
@@ -403,8 +403,4 @@ struct esp {
403#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) 403#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
404#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) 404#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
405 405
406/* For our interrupt engine. */
407#define for_each_esp(esp) \
408 for((esp) = espchain; (esp); (esp) = (esp)->next)
409
410#endif /* !(_SPARC_ESP_H) */ 406#endif /* !(_SPARC_ESP_H) */
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
index 8ae9c406a83b..2a1c5c22b9e0 100644
--- a/drivers/scsi/fastlane.c
+++ b/drivers/scsi/fastlane.c
@@ -210,7 +210,7 @@ int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
210 210
211 esp->irq = IRQ_AMIGA_PORTS; 211 esp->irq = IRQ_AMIGA_PORTS;
212 esp->slot = board+FASTLANE_ESP_ADDR; 212 esp->slot = board+FASTLANE_ESP_ADDR;
213 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 213 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
214 "Fastlane SCSI", esp->ehost)) { 214 "Fastlane SCSI", esp->ehost)) {
215 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS); 215 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
216 goto err_unmap; 216 goto err_unmap;
diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c
index 03416548f20c..c4e16c0775de 100644
--- a/drivers/scsi/fcal.c
+++ b/drivers/scsi/fcal.c
@@ -13,7 +13,6 @@
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/stat.h> 14#include <linux/stat.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/config.h>
17#ifdef CONFIG_KMOD 16#ifdef CONFIG_KMOD
18#include <linux/kmod.h> 17#include <linux/kmod.h>
19#endif 18#endif
@@ -249,8 +248,7 @@ int fcal_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t of
249 if (scd->id == target) { 248 if (scd->id == target) {
250 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ", 249 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ",
251 alpa, target, u1[0], u1[1], u2[0], u2[1]); 250 alpa, target, u1[0], u1[1], u2[0], u2[1]);
252 SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ? 251 SPRINTF ("%s ", scsi_device_type(scd->type));
253 scsi_device_types[(short) scd->type] : "Unknown device");
254 252
255 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++) 253 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++)
256 SPRINTF ("%c", scd->vendor[j]); 254 SPRINTF ("%c", scd->vendor[j]);
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 70a1606bd580..dde3edf35c03 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -400,7 +400,7 @@ static int fd_mcs_detect(struct scsi_host_template * tpnt)
400 mca_set_adapter_name(slot - 1, fd_mcs_adapters[loop].name); 400 mca_set_adapter_name(slot - 1, fd_mcs_adapters[loop].name);
401 401
402 /* check irq/region */ 402 /* check irq/region */
403 if (request_irq(irq, fd_mcs_intr, SA_SHIRQ, "fd_mcs", hosts)) { 403 if (request_irq(irq, fd_mcs_intr, IRQF_SHARED, "fd_mcs", hosts)) {
404 printk(KERN_ERR "fd_mcs: interrupt is not available, skipping...\n"); 404 printk(KERN_ERR "fd_mcs: interrupt is not available, skipping...\n");
405 continue; 405 continue;
406 } 406 }
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index e16013f0ad6e..b0694dcce246 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -266,7 +266,6 @@
266 266
267 **************************************************************************/ 267 **************************************************************************/
268 268
269#include <linux/config.h>
270#include <linux/module.h> 269#include <linux/module.h>
271#include <linux/init.h> 270#include <linux/init.h>
272#include <linux/interrupt.h> 271#include <linux/interrupt.h>
@@ -950,7 +949,7 @@ struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt )
950 /* Register the IRQ with the kernel */ 949 /* Register the IRQ with the kernel */
951 950
952 retcode = request_irq( interrupt_level, 951 retcode = request_irq( interrupt_level,
953 do_fdomain_16x0_intr, pdev?SA_SHIRQ:0, "fdomain", shpnt); 952 do_fdomain_16x0_intr, pdev?IRQF_SHARED:0, "fdomain", shpnt);
954 953
955 if (retcode < 0) { 954 if (retcode < 0) {
956 if (retcode == -EINVAL) { 955 if (retcode == -EINVAL) {
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5f313c93b7a9..cdd893bb4e28 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -91,7 +91,6 @@
91#define AUTOPROBE_IRQ 91#define AUTOPROBE_IRQ
92#define AUTOSENSE 92#define AUTOSENSE
93 93
94#include <linux/config.h>
95 94
96#ifdef CONFIG_SCSI_GENERIC_NCR53C400 95#ifdef CONFIG_SCSI_GENERIC_NCR53C400
97#define NCR53C400_PSEUDO_DMA 1 96#define NCR53C400_PSEUDO_DMA 1
@@ -462,7 +461,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
462 instance->irq = NCR5380_probe_irq(instance, 0xffff); 461 instance->irq = NCR5380_probe_irq(instance, 0xffff);
463 462
464 if (instance->irq != SCSI_IRQ_NONE) 463 if (instance->irq != SCSI_IRQ_NONE)
465 if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", instance)) { 464 if (request_irq(instance->irq, generic_NCR5380_intr, IRQF_DISABLED, "NCR5380", instance)) {
466 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 465 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
467 instance->irq = SCSI_IRQ_NONE; 466 instance->irq = SCSI_IRQ_NONE;
468 } 467 }
@@ -812,7 +811,6 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
812 struct NCR5380_hostdata *hostdata; 811 struct NCR5380_hostdata *hostdata;
813#ifdef NCR5380_STATS 812#ifdef NCR5380_STATS
814 struct scsi_device *dev; 813 struct scsi_device *dev;
815 extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
816#endif 814#endif
817 815
818 NCR5380_setup(scsi_ptr); 816 NCR5380_setup(scsi_ptr);
@@ -852,7 +850,7 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
852 long tr = hostdata->time_read[dev->id] / HZ; 850 long tr = hostdata->time_read[dev->id] / HZ;
853 long tw = hostdata->time_write[dev->id] / HZ; 851 long tw = hostdata->time_write[dev->id] / HZ;
854 852
855 PRINTP(" T:%d %s " ANDP dev->id ANDP(dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int) dev->type] : "Unknown"); 853 PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
856 for (i = 0; i < 8; i++) 854 for (i = 0; i < 8; i++)
857 if (dev->vendor[i] >= 0x20) 855 if (dev->vendor[i] >= 0x20)
858 *(buffer + (len++)) = dev->vendor[i]; 856 *(buffer + (len++)) = dev->vendor[i];
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index d60a89cb8052..df0b3f69ef63 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -32,7 +32,6 @@
32#ifndef GENERIC_NCR5380_H 32#ifndef GENERIC_NCR5380_H
33#define GENERIC_NCR5380_H 33#define GENERIC_NCR5380_H
34 34
35#include <linux/config.h>
36 35
37#define GENERIC_NCR5380_PUBLIC_RELEASE 1 36#define GENERIC_NCR5380_PUBLIC_RELEASE 1
38 37
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 76071a158306..43afd476e606 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4350,7 +4350,7 @@ static int __init gdth_detect(Scsi_Host_Template *shtp)
4350 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", 4350 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
4351 isa_bios,ha->irq,ha->drq); 4351 isa_bios,ha->irq,ha->drq);
4352 4352
4353 if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",ha)) { 4353 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) {
4354 printk("GDT-ISA: Unable to allocate IRQ\n"); 4354 printk("GDT-ISA: Unable to allocate IRQ\n");
4355 scsi_unregister(shp); 4355 scsi_unregister(shp);
4356 continue; 4356 continue;
@@ -4476,7 +4476,7 @@ static int __init gdth_detect(Scsi_Host_Template *shtp)
4476 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", 4476 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
4477 eisa_slot>>12,ha->irq); 4477 eisa_slot>>12,ha->irq);
4478 4478
4479 if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",ha)) { 4479 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) {
4480 printk("GDT-EISA: Unable to allocate IRQ\n"); 4480 printk("GDT-EISA: Unable to allocate IRQ\n");
4481 scsi_unregister(shp); 4481 scsi_unregister(shp);
4482 continue; 4482 continue;
@@ -4603,7 +4603,7 @@ static int __init gdth_detect(Scsi_Host_Template *shtp)
4603 pcistr[ctr].bus,PCI_SLOT(pcistr[ctr].device_fn),ha->irq); 4603 pcistr[ctr].bus,PCI_SLOT(pcistr[ctr].device_fn),ha->irq);
4604 4604
4605 if (request_irq(ha->irq, gdth_interrupt, 4605 if (request_irq(ha->irq, gdth_interrupt,
4606 SA_INTERRUPT|SA_SHIRQ, "gdth", ha)) 4606 IRQF_DISABLED|IRQF_SHARED, "gdth", ha))
4607 { 4607 {
4608 printk("GDT-PCI: Unable to allocate IRQ\n"); 4608 printk("GDT-PCI: Unable to allocate IRQ\n");
4609 scsi_unregister(shp); 4609 scsi_unregister(shp);
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 5b154498056d..18dbe5c27dac 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -47,7 +47,7 @@ void gvp11_setup (char *str, int *ints)
47 gvp11_xfer_mask = ints[1]; 47 gvp11_xfer_mask = ints[1];
48} 48}
49 49
50static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 50static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 51{
52 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 52 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
53 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 53 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -142,8 +142,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
142 return 0; 142 return 0;
143} 143}
144 144
145static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
146 int status) 146 int status)
147{ 147{
148 /* stop DMA */ 148 /* stop DMA */
149 DMA(instance)->SP_DMA = 1; 149 DMA(instance)->SP_DMA = 1;
@@ -328,7 +328,7 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
328 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 328 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
329 : WD33C93_FS_12_15); 329 : WD33C93_FS_12_15);
330 330
331 request_irq(IRQ_AMIGA_PORTS, gvp11_intr, SA_SHIRQ, "GVP11 SCSI", 331 request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
332 instance); 332 instance);
333 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 333 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
334 num_gvp11++; 334 num_gvp11++;
@@ -341,7 +341,7 @@ release:
341 return num_gvp11; 341 return num_gvp11;
342} 342}
343 343
344static int gvp11_bus_reset(Scsi_Cmnd *cmd) 344static int gvp11_bus_reset(struct scsi_cmnd *cmd)
345{ 345{
346 /* FIXME perform bus-specific reset */ 346 /* FIXME perform bus-specific reset */
347 347
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index 575d219d14ba..bf22859a5035 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -13,10 +13,6 @@
13 13
14int gvp11_detect(struct scsi_host_template *); 14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *); 15int gvp11_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index dfcb96f3e60c..68ef1636678d 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -265,6 +265,9 @@ static void scsi_host_dev_release(struct device *dev)
265 destroy_workqueue(shost->work_q); 265 destroy_workqueue(shost->work_q);
266 266
267 scsi_destroy_command_freelist(shost); 267 scsi_destroy_command_freelist(shost);
268 if (shost->bqt)
269 blk_free_tags(shost->bqt);
270
268 kfree(shost->shost_data); 271 kfree(shost->shost_data);
269 272
270 if (parent) 273 if (parent)
@@ -487,7 +490,9 @@ EXPORT_SYMBOL(scsi_is_host_device);
487 * @work: Work to queue for execution. 490 * @work: Work to queue for execution.
488 * 491 *
489 * Return value: 492 * Return value:
490 * 0 on success / != 0 for error 493 * 1 - work queued for execution
494 * 0 - work is already queued
495 * -EINVAL - work queue doesn't exist
491 **/ 496 **/
492int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) 497int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
493{ 498{
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index f77808329e7c..28bfb8f9f81d 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -15,7 +15,6 @@
15 * 15 *
16 * For more information, visit http://www.highpoint-tech.com 16 * For more information, visit http://www.highpoint-tech.com
17 */ 17 */
18#include <linux/config.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/types.h> 19#include <linux/types.h>
21#include <linux/string.h> 20#include <linux/string.h>
@@ -45,10 +44,6 @@ static char driver_name[] = "hptiop";
45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
46static const char driver_ver[] = "v1.0 (060426)"; 45static const char driver_ver[] = "v1.0 (060426)";
47 46
48static DEFINE_SPINLOCK(hptiop_hba_list_lock);
49static LIST_HEAD(hptiop_hba_list);
50static int hptiop_cdev_major = -1;
51
52static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 47static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
53static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
54static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 49static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +572,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
577 if (atomic_xchg(&hba->resetting, 1) == 0) { 572 if (atomic_xchg(&hba->resetting, 1) == 0) {
578 atomic_inc(&hba->reset_count); 573 atomic_inc(&hba->reset_count);
579 writel(IOPMU_INBOUND_MSG0_RESET, 574 writel(IOPMU_INBOUND_MSG0_RESET,
580 &hba->iop->outbound_msgaddr0); 575 &hba->iop->inbound_msgaddr0);
581 hptiop_pci_posting_flush(hba->iop); 576 hptiop_pci_posting_flush(hba->iop);
582 } 577 }
583 578
@@ -620,532 +615,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
620 return queue_depth; 615 return queue_depth;
621} 616}
622 617
623struct hptiop_getinfo {
624 char __user *buffer;
625 loff_t buflength;
626 loff_t bufoffset;
627 loff_t buffillen;
628 loff_t filpos;
629};
630
631static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
632 char *data, int datalen)
633{
634 if (pinfo->filpos < pinfo->bufoffset) {
635 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
636 pinfo->filpos += datalen;
637 return;
638 } else {
639 data += (pinfo->bufoffset - pinfo->filpos);
640 datalen -= (pinfo->bufoffset - pinfo->filpos);
641 pinfo->filpos = pinfo->bufoffset;
642 }
643 }
644
645 pinfo->filpos += datalen;
646 if (pinfo->buffillen == pinfo->buflength)
647 return;
648
649 if (pinfo->buflength - pinfo->buffillen < datalen)
650 datalen = pinfo->buflength - pinfo->buffillen;
651
652 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
653 return;
654
655 pinfo->buffillen += datalen;
656}
657
658static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
659{
660 va_list args;
661 char buf[128];
662 int len;
663
664 va_start(args, fmt);
665 len = vsnprintf(buf, sizeof(buf), fmt, args);
666 va_end(args);
667 hptiop_copy_mem_info(pinfo, buf, len);
668 return len;
669}
670
671static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
672{
673 arg->done = NULL;
674 wake_up(&arg->hba->ioctl_wq);
675}
676
677static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
678{
679 struct hptiop_hba *hba = arg->hba;
680 u32 val;
681 struct hpt_iop_request_ioctl_command __iomem *req;
682 int ioctl_retry = 0;
683
684 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
685
686 /*
687 * check (in + out) buff size from application.
688 * outbuf must be dword aligned.
689 */
690 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
691 hba->max_request_size
692 - sizeof(struct hpt_iop_request_header)
693 - 4 * sizeof(u32)) {
694 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
695 hba->host->host_no,
696 arg->inbuf_size, arg->outbuf_size);
697 arg->result = HPT_IOCTL_RESULT_FAILED;
698 return;
699 }
700
701retry:
702 spin_lock_irq(hba->host->host_lock);
703
704 val = readl(&hba->iop->inbound_queue);
705 if (val == IOPMU_QUEUE_EMPTY) {
706 spin_unlock_irq(hba->host->host_lock);
707 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
708 arg->result = -1;
709 return;
710 }
711
712 req = (struct hpt_iop_request_ioctl_command __iomem *)
713 ((unsigned long)hba->iop + val);
714
715 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
716 &req->ioctl_code);
717 writel(arg->inbuf_size, &req->inbuf_size);
718 writel(arg->outbuf_size, &req->outbuf_size);
719
720 /*
721 * use the buffer on the IOP local memory first, then copy it
722 * back to host.
723 * the caller's request buffer shoudl be little-endian.
724 */
725 if (arg->inbuf_size)
726 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
727
728 /* correct the controller ID for IOP */
729 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
731 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
732 && arg->inbuf_size >= sizeof(u32))
733 writel(0, req->buf);
734
735 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
736 writel(0, &req->header.flags);
737 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
738 + arg->inbuf_size, &req->header.size);
739 writel((u32)(unsigned long)arg, &req->header.context);
740 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
741 &req->header.context_hi32);
742 writel(IOP_RESULT_PENDING, &req->header.result);
743
744 arg->result = HPT_IOCTL_RESULT_FAILED;
745 arg->done = hptiop_ioctl_done;
746
747 writel(val, &hba->iop->inbound_queue);
748 hptiop_pci_posting_flush(hba->iop);
749
750 spin_unlock_irq(hba->host->host_lock);
751
752 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
753
754 if (arg->done != NULL) {
755 hptiop_reset_hba(hba);
756 if (ioctl_retry++ < 3)
757 goto retry;
758 }
759
760 dprintk("hpt_iop_ioctl %x result %d\n",
761 arg->ioctl_code, arg->result);
762}
763
764static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
765 u32 insize, void *outbuf, u32 outsize)
766{
767 struct hpt_ioctl_k arg;
768 arg.hba = hba;
769 arg.ioctl_code = code;
770 arg.inbuf = inbuf;
771 arg.outbuf = outbuf;
772 arg.inbuf_size = insize;
773 arg.outbuf_size = outsize;
774 arg.bytes_returned = NULL;
775 hptiop_do_ioctl(&arg);
776 return arg.result;
777}
778
779static inline int hpt_id_valid(__le32 id)
780{
781 return id != 0 && id != cpu_to_le32(0xffffffff);
782}
783
784static int hptiop_get_controller_info(struct hptiop_hba *hba,
785 struct hpt_controller_info *pinfo)
786{
787 int id = 0;
788
789 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
790 &id, sizeof(int), pinfo, sizeof(*pinfo));
791}
792
793
794static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
795 struct hpt_channel_info *pinfo)
796{
797 u32 ids[2];
798
799 ids[0] = 0;
800 ids[1] = bus;
801 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
802 ids, sizeof(ids), pinfo, sizeof(*pinfo));
803
804}
805
806static int hptiop_get_logical_devices(struct hptiop_hba *hba,
807 __le32 *pids, int maxcount)
808{
809 int i;
810 u32 count = maxcount - 1;
811
812 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
813 &count, sizeof(u32),
814 pids, sizeof(u32) * maxcount))
815 return -1;
816
817 maxcount = le32_to_cpu(pids[0]);
818 for (i = 0; i < maxcount; i++)
819 pids[i] = pids[i+1];
820
821 return maxcount;
822}
823
824static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
825 struct hpt_logical_device_info_v3 *pinfo)
826{
827 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
828 &id, sizeof(u32),
829 pinfo, sizeof(*pinfo));
830}
831
832static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
833{
834 static char s[64];
835 u32 flags = le32_to_cpu(devinfo->u.array.flags);
836 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
837 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
838
839 if (flags & ARRAY_FLAG_DISABLED)
840 return "Disabled";
841 else if (flags & ARRAY_FLAG_TRANSFORMING)
842 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
843 trans_prog / 100,
844 trans_prog % 100,
845 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
846 ", Critical" : "",
847 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
848 !(flags & ARRAY_FLAG_REBUILDING) &&
849 !(flags & ARRAY_FLAG_INITIALIZING))?
850 ", Unintialized" : "");
851 else if ((flags & ARRAY_FLAG_BROKEN) &&
852 devinfo->u.array.array_type != AT_RAID6)
853 return "Critical";
854 else if (flags & ARRAY_FLAG_REBUILDING)
855 sprintf(s,
856 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
857 "%sBackground initializing %d.%d%%" :
858 "%sRebuilding %d.%d%%",
859 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
860 reb_prog / 100,
861 reb_prog % 100);
862 else if (flags & ARRAY_FLAG_VERIFYING)
863 sprintf(s, "%sVerifying %d.%d%%",
864 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
865 reb_prog / 100,
866 reb_prog % 100);
867 else if (flags & ARRAY_FLAG_INITIALIZING)
868 sprintf(s, "%sForground initializing %d.%d%%",
869 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
870 reb_prog / 100,
871 reb_prog % 100);
872 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
873 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
874 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
875 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
876 !(flags & ARRAY_FLAG_REBUILDING) &&
877 !(flags & ARRAY_FLAG_INITIALIZING))?
878 ", Unintialized" : "");
879 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
880 !(flags & ARRAY_FLAG_REBUILDING) &&
881 !(flags & ARRAY_FLAG_INITIALIZING))
882 sprintf(s,"%sUninitialized",
883 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
884 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
885 (flags & ARRAY_FLAG_BROKEN))
886 return "Critical";
887 else
888 return "Normal";
889 return s;
890}
891
892static void hptiop_dump_devinfo(struct hptiop_hba *hba,
893 struct hptiop_getinfo *pinfo, __le32 id, int indent)
894{
895 struct hpt_logical_device_info_v3 devinfo;
896 int i;
897 u64 capacity;
898
899 for (i = 0; i < indent; i++)
900 hptiop_copy_info(pinfo, "\t");
901
902 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
903 hptiop_copy_info(pinfo, "unknown\n");
904 return;
905 }
906
907 switch (devinfo.type) {
908
909 case LDT_DEVICE: {
910 struct hd_driveid *driveid;
911 u32 flags = le32_to_cpu(devinfo.u.device.flags);
912
913 driveid = (struct hd_driveid *)devinfo.u.device.ident;
914 /* model[] is 40 chars long, but we just want 20 chars here */
915 driveid->model[20] = 0;
916
917 if (indent)
918 if (flags & DEVICE_FLAG_DISABLED)
919 hptiop_copy_info(pinfo,"Missing\n");
920 else
921 hptiop_copy_info(pinfo, "CH%d %s\n",
922 devinfo.u.device.path_id + 1,
923 driveid->model);
924 else {
925 capacity = le64_to_cpu(devinfo.capacity) * 512;
926 do_div(capacity, 1000000);
927 hptiop_copy_info(pinfo,
928 "CH%d %s, %lluMB, %s %s%s%s%s\n",
929 devinfo.u.device.path_id + 1,
930 driveid->model,
931 capacity,
932 (flags & DEVICE_FLAG_DISABLED)?
933 "Disabled" : "Normal",
934 devinfo.u.device.read_ahead_enabled?
935 "[RA]" : "",
936 devinfo.u.device.write_cache_enabled?
937 "[WC]" : "",
938 devinfo.u.device.TCQ_enabled?
939 "[TCQ]" : "",
940 devinfo.u.device.NCQ_enabled?
941 "[NCQ]" : ""
942 );
943 }
944 break;
945 }
946
947 case LDT_ARRAY:
948 if (devinfo.target_id != INVALID_TARGET_ID)
949 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
950 devinfo.vbus_id, devinfo.target_id);
951
952 capacity = le64_to_cpu(devinfo.capacity) * 512;
953 do_div(capacity, 1000000);
954 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
955 devinfo.u.array.name,
956 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
957 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
958 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
959 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
960 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
961 "unknown",
962 capacity,
963 get_array_status(&devinfo));
964 for (i = 0; i < devinfo.u.array.ndisk; i++) {
965 if (hpt_id_valid(devinfo.u.array.members[i])) {
966 if (cpu_to_le16(1<<i) &
967 devinfo.u.array.critical_members)
968 hptiop_copy_info(pinfo, "\t*");
969 hptiop_dump_devinfo(hba, pinfo,
970 devinfo.u.array.members[i], indent+1);
971 }
972 else
973 hptiop_copy_info(pinfo, "\tMissing\n");
974 }
975 if (id == devinfo.u.array.transform_source) {
976 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
977 hptiop_dump_devinfo(hba, pinfo,
978 devinfo.u.array.transform_target, indent+1);
979 }
980 break;
981 }
982}
983
984static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 618static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
985{ 619{
986 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 620 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
987} 621}
988 622
989static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
990 size_t count, loff_t *ppos)
991{
992 struct hptiop_hba *hba = filp->private_data;
993 struct hptiop_getinfo info;
994 int i, j, ndev;
995 struct hpt_controller_info con_info;
996 struct hpt_channel_info chan_info;
997 __le32 ids[32];
998
999 info.buffer = buf;
1000 info.buflength = count;
1001 info.bufoffset = ppos ? *ppos : 0;
1002 info.filpos = 0;
1003 info.buffillen = 0;
1004
1005 if (hptiop_get_controller_info(hba, &con_info))
1006 return -EIO;
1007
1008 for (i = 0; i < con_info.num_buses; i++) {
1009 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1010 if (hpt_id_valid(chan_info.devices[0]))
1011 hptiop_dump_devinfo(hba, &info,
1012 chan_info.devices[0], 0);
1013 if (hpt_id_valid(chan_info.devices[1]))
1014 hptiop_dump_devinfo(hba, &info,
1015 chan_info.devices[1], 0);
1016 }
1017 }
1018
1019 ndev = hptiop_get_logical_devices(hba, ids,
1020 sizeof(ids) / sizeof(ids[0]));
1021
1022 /*
1023 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1024 * output nothing here
1025 */
1026 for (j = 0; j < ndev; j++)
1027 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1028
1029 if (ppos)
1030 *ppos += info.buffillen;
1031
1032 return info.buffillen;
1033}
1034
1035static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1036 unsigned int cmd, unsigned long arg)
1037{
1038 struct hptiop_hba *hba = file->private_data;
1039 struct hpt_ioctl_u ioctl_u;
1040 struct hpt_ioctl_k ioctl_k;
1041 u32 bytes_returned;
1042 int err = -EINVAL;
1043
1044 if (copy_from_user(&ioctl_u,
1045 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1046 return -EINVAL;
1047
1048 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1049 return -EINVAL;
1050
1051 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1052 ioctl_k.inbuf = NULL;
1053 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1054 ioctl_k.outbuf = NULL;
1055 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1056 ioctl_k.hba = hba;
1057 ioctl_k.bytes_returned = &bytes_returned;
1058
1059 /* verify user buffer */
1060 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1061 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1062 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1063 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1064 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1065 ioctl_u.bytes_returned, sizeof(u32))) ||
1066 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1067
1068 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1069 return -EINVAL;
1070 }
1071
1072 /* map buffer to kernel. */
1073 if (ioctl_k.inbuf_size) {
1074 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1075 if (!ioctl_k.inbuf) {
1076 dprintk("scsi%d: fail to alloc inbuf\n",
1077 hba->host->host_no);
1078 err = -ENOMEM;
1079 goto err_exit;
1080 }
1081
1082 if (copy_from_user(ioctl_k.inbuf,
1083 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1084 goto err_exit;
1085 }
1086 }
1087
1088 if (ioctl_k.outbuf_size) {
1089 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1090 if (!ioctl_k.outbuf) {
1091 dprintk("scsi%d: fail to alloc outbuf\n",
1092 hba->host->host_no);
1093 err = -ENOMEM;
1094 goto err_exit;
1095 }
1096 }
1097
1098 hptiop_do_ioctl(&ioctl_k);
1099
1100 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1101 if (ioctl_k.outbuf_size &&
1102 copy_to_user(ioctl_u.outbuf,
1103 ioctl_k.outbuf, ioctl_k.outbuf_size))
1104 goto err_exit;
1105
1106 if (ioctl_u.bytes_returned &&
1107 copy_to_user(ioctl_u.bytes_returned,
1108 &bytes_returned, sizeof(u32)))
1109 goto err_exit;
1110
1111 err = 0;
1112 }
1113
1114err_exit:
1115 kfree(ioctl_k.inbuf);
1116 kfree(ioctl_k.outbuf);
1117
1118 return err;
1119}
1120
1121static int hptiop_cdev_open(struct inode *inode, struct file *file)
1122{
1123 struct hptiop_hba *hba;
1124 unsigned i = 0, minor = iminor(inode);
1125 int ret = -ENODEV;
1126
1127 spin_lock(&hptiop_hba_list_lock);
1128 list_for_each_entry(hba, &hptiop_hba_list, link) {
1129 if (i == minor) {
1130 file->private_data = hba;
1131 ret = 0;
1132 goto out;
1133 }
1134 i++;
1135 }
1136
1137out:
1138 spin_unlock(&hptiop_hba_list_lock);
1139 return ret;
1140}
1141
1142static struct file_operations hptiop_cdev_fops = {
1143 .owner = THIS_MODULE,
1144 .read = hptiop_cdev_read,
1145 .ioctl = hptiop_cdev_ioctl,
1146 .open = hptiop_cdev_open,
1147};
1148
1149static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 623static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1150{ 624{
1151 struct Scsi_Host *host = class_to_shost(class_dev); 625 struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +770,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1296 goto unmap_pci_bar; 770 goto unmap_pci_bar;
1297 } 771 }
1298 772
1299 if (scsi_add_host(host, &pcidev->dev)) {
1300 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1301 hba->host->host_no);
1302 goto unmap_pci_bar;
1303 }
1304
1305 pci_set_drvdata(pcidev, host); 773 pci_set_drvdata(pcidev, host);
1306 774
1307 if (request_irq(pcidev->irq, hptiop_intr, SA_SHIRQ, 775 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1308 driver_name, hba)) { 776 driver_name, hba)) {
1309 printk(KERN_ERR "scsi%d: request irq %d failed\n", 777 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1310 hba->host->host_no, pcidev->irq); 778 hba->host->host_no, pcidev->irq);
1311 goto remove_scsi_host; 779 goto unmap_pci_bar;
1312 } 780 }
1313 781
1314 /* Allocate request mem */ 782 /* Allocate request mem */
@@ -1355,9 +823,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1355 if (hptiop_initialize_iop(hba)) 823 if (hptiop_initialize_iop(hba))
1356 goto free_request_mem; 824 goto free_request_mem;
1357 825
1358 spin_lock(&hptiop_hba_list_lock); 826 if (scsi_add_host(host, &pcidev->dev)) {
1359 list_add_tail(&hba->link, &hptiop_hba_list); 827 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1360 spin_unlock(&hptiop_hba_list_lock); 828 hba->host->host_no);
829 goto free_request_mem;
830 }
831
1361 832
1362 scsi_scan_host(host); 833 scsi_scan_host(host);
1363 834
@@ -1372,9 +843,6 @@ free_request_mem:
1372free_request_irq: 843free_request_irq:
1373 free_irq(hba->pcidev->irq, hba); 844 free_irq(hba->pcidev->irq, hba);
1374 845
1375remove_scsi_host:
1376 scsi_remove_host(host);
1377
1378unmap_pci_bar: 846unmap_pci_bar:
1379 iounmap(hba->iop); 847 iounmap(hba->iop);
1380 848
@@ -1422,10 +890,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
1422 890
1423 scsi_remove_host(host); 891 scsi_remove_host(host);
1424 892
1425 spin_lock(&hptiop_hba_list_lock);
1426 list_del_init(&hba->link);
1427 spin_unlock(&hptiop_hba_list_lock);
1428
1429 hptiop_shutdown(pcidev); 893 hptiop_shutdown(pcidev);
1430 894
1431 free_irq(hba->pcidev->irq, hba); 895 free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +926,12 @@ static struct pci_driver hptiop_pci_driver = {
1462 926
1463static int __init hptiop_module_init(void) 927static int __init hptiop_module_init(void)
1464{ 928{
1465 int error;
1466
1467 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 929 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1468 930 return pci_register_driver(&hptiop_pci_driver);
1469 error = pci_register_driver(&hptiop_pci_driver);
1470 if (error < 0)
1471 return error;
1472
1473 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1474 if (hptiop_cdev_major < 0) {
1475 printk(KERN_WARNING "unable to register hptiop device.\n");
1476 return hptiop_cdev_major;
1477 }
1478
1479 return 0;
1480} 931}
1481 932
1482static void __exit hptiop_module_exit(void) 933static void __exit hptiop_module_exit(void)
1483{ 934{
1484 dprintk("hptiop_module_exit\n");
1485 unregister_chrdev(hptiop_cdev_major, "hptiop");
1486 pci_unregister_driver(&hptiop_pci_driver); 935 pci_unregister_driver(&hptiop_pci_driver);
1487} 936}
1488 937
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 115f55471ed3..2be1dc5d852a 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -17,7 +17,6 @@
17 17
18 */ 18 */
19 19
20#include <linux/config.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/types.h> 22#include <linux/types.h>
@@ -760,7 +759,7 @@ static int device_inquiry(int host_index, int ldn)
760 while (!got_interrupt(host_index)) 759 while (!got_interrupt(host_index))
761 barrier(); 760 barrier();
762 761
763 /*if command succesful, break */ 762 /*if command successful, break */
764 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 763 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
765 return 1; 764 return 1;
766 } 765 }
@@ -885,7 +884,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
885 while (!got_interrupt(host_index)) 884 while (!got_interrupt(host_index))
886 barrier(); 885 barrier();
887 886
888 /*if command succesful, break */ 887 /*if command successful, break */
889 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 888 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
890 return 1; 889 return 1;
891 } 890 }
@@ -921,7 +920,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
921 return 2; 920 return 2;
922 } else 921 } else
923 global_command_error_excuse = 0; 922 global_command_error_excuse = 0;
924 /*if command succesful, break */ 923 /*if command successful, break */
925 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 924 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
926 return 1; 925 return 1;
927 } 926 }
@@ -959,7 +958,7 @@ static int immediate_reset(int host_index, unsigned int ldn)
959 /* did not work, finish */ 958 /* did not work, finish */
960 return 1; 959 return 1;
961 } 960 }
962 /*if command succesful, break */ 961 /*if command successful, break */
963 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 962 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
964 return 1; 963 return 1;
965 } 964 }
@@ -1511,7 +1510,7 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1511#endif 1510#endif
1512 1511
1513 /* get interrupt request level */ 1512 /* get interrupt request level */
1514 if (request_irq(IM_IRQ, interrupt_handler, SA_SHIRQ, "ibmmcascsi", hosts)) { 1513 if (request_irq(IM_IRQ, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1515 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ); 1514 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ);
1516 return 0; 1515 return 0;
1517 } else 1516 } else
@@ -1636,7 +1635,7 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1636 /* IRQ11 is used by SCSI-2 F/W Adapter/A */ 1635 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1637 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n"); 1636 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1638 /* get interrupt request level */ 1637 /* get interrupt request level */
1639 if (request_irq(IM_IRQ_FW, interrupt_handler, SA_SHIRQ, "ibmmcascsi", hosts)) { 1638 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1640 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW); 1639 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1641 } else 1640 } else
1642 IRQ11_registered++; 1641 IRQ11_registered++;
@@ -1697,7 +1696,7 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1697 /* IRQ11 is used by SCSI-2 F/W Adapter/A */ 1696 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1698 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n"); 1697 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1699 /* get interrupt request level */ 1698 /* get interrupt request level */
1700 if (request_irq(IM_IRQ_FW, interrupt_handler, SA_SHIRQ, "ibmmcascsi", hosts)) 1699 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts))
1701 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW); 1700 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1702 else 1701 else
1703 IRQ11_registered++; 1702 IRQ11_registered++;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 944fc1203ebd..669ea4fff166 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -535,6 +535,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
535 struct ibmvscsi_host_data *hostdata) 535 struct ibmvscsi_host_data *hostdata)
536{ 536{
537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
538 int request_status;
538 int rc; 539 int rc;
539 540
540 /* If we have exhausted our request limit, just fail this request. 541 /* If we have exhausted our request limit, just fail this request.
@@ -542,9 +543,18 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
542 * (such as task management requests) that the mid layer may think we 543 * (such as task management requests) that the mid layer may think we
543 * can handle more requests (can_queue) when we actually can't 544 * can handle more requests (can_queue) when we actually can't
544 */ 545 */
545 if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && 546 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
546 (atomic_dec_if_positive(&hostdata->request_limit) < 0)) 547 request_status =
547 goto send_error; 548 atomic_dec_if_positive(&hostdata->request_limit);
549 /* If request limit was -1 when we started, it is now even
550 * less than that
551 */
552 if (request_status < -1)
553 goto send_error;
554 /* Otherwise, if we have run out of requests */
555 else if (request_status < 0)
556 goto send_busy;
557 }
548 558
549 /* Copy the IU into the transfer area */ 559 /* Copy the IU into the transfer area */
550 *evt_struct->xfer_iu = evt_struct->iu; 560 *evt_struct->xfer_iu = evt_struct->iu;
@@ -567,11 +577,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
567 577
568 return 0; 578 return 0;
569 579
570 send_error: 580 send_busy:
571 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
572 582
573 free_event_struct(&hostdata->pool, evt_struct); 583 free_event_struct(&hostdata->pool, evt_struct);
574 return SCSI_MLQUEUE_HOST_BUSY; 584 return SCSI_MLQUEUE_HOST_BUSY;
585
586 send_error:
587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
588
589 if (evt_struct->cmnd != NULL) {
590 evt_struct->cmnd->result = DID_ERROR << 16;
591 evt_struct->cmnd_done(evt_struct->cmnd);
592 } else if (evt_struct->done)
593 evt_struct->done(evt_struct);
594
595 free_event_struct(&hostdata->pool, evt_struct);
596 return 0;
575} 597}
576 598
577/** 599/**
@@ -1184,27 +1206,37 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1184 return; 1206 return;
1185 case 0xFF: /* Hypervisor telling us the connection is closed */ 1207 case 0xFF: /* Hypervisor telling us the connection is closed */
1186 scsi_block_requests(hostdata->host); 1208 scsi_block_requests(hostdata->host);
1209 atomic_set(&hostdata->request_limit, 0);
1187 if (crq->format == 0x06) { 1210 if (crq->format == 0x06) {
1188 /* We need to re-setup the interpartition connection */ 1211 /* We need to re-setup the interpartition connection */
1189 printk(KERN_INFO 1212 printk(KERN_INFO
1190 "ibmvscsi: Re-enabling adapter!\n"); 1213 "ibmvscsi: Re-enabling adapter!\n");
1191 atomic_set(&hostdata->request_limit, -1);
1192 purge_requests(hostdata, DID_REQUEUE); 1214 purge_requests(hostdata, DID_REQUEUE);
1193 if (ibmvscsi_reenable_crq_queue(&hostdata->queue, 1215 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1194 hostdata) == 0) 1216 hostdata) == 0) ||
1195 if (ibmvscsi_send_crq(hostdata, 1217 (ibmvscsi_send_crq(hostdata,
1196 0xC001000000000000LL, 0)) 1218 0xC001000000000000LL, 0))) {
1219 atomic_set(&hostdata->request_limit,
1220 -1);
1197 printk(KERN_ERR 1221 printk(KERN_ERR
1198 "ibmvscsi: transmit error after" 1222 "ibmvscsi: error after"
1199 " enable\n"); 1223 " enable\n");
1224 }
1200 } else { 1225 } else {
1201 printk(KERN_INFO 1226 printk(KERN_INFO
1202 "ibmvscsi: Virtual adapter failed rc %d!\n", 1227 "ibmvscsi: Virtual adapter failed rc %d!\n",
1203 crq->format); 1228 crq->format);
1204 1229
1205 atomic_set(&hostdata->request_limit, -1);
1206 purge_requests(hostdata, DID_ERROR); 1230 purge_requests(hostdata, DID_ERROR);
1207 ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); 1231 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
1232 hostdata)) ||
1233 (ibmvscsi_send_crq(hostdata,
1234 0xC001000000000000LL, 0))) {
1235 atomic_set(&hostdata->request_limit,
1236 -1);
1237 printk(KERN_ERR
1238 "ibmvscsi: error after reset\n");
1239 }
1208 } 1240 }
1209 scsi_unblock_requests(hostdata->host); 1241 scsi_unblock_requests(hostdata->host);
1210 return; 1242 return;
@@ -1467,6 +1499,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1467 struct Scsi_Host *host; 1499 struct Scsi_Host *host;
1468 struct device *dev = &vdev->dev; 1500 struct device *dev = &vdev->dev;
1469 unsigned long wait_switch = 0; 1501 unsigned long wait_switch = 0;
1502 int rc;
1470 1503
1471 vdev->dev.driver_data = NULL; 1504 vdev->dev.driver_data = NULL;
1472 1505
@@ -1484,8 +1517,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1484 atomic_set(&hostdata->request_limit, -1); 1517 atomic_set(&hostdata->request_limit, -1);
1485 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1518 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
1486 1519
1487 if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, 1520 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1488 max_requests) != 0) { 1521 if (rc != 0 && rc != H_RESOURCE) {
1489 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1522 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
1490 goto init_crq_failed; 1523 goto init_crq_failed;
1491 } 1524 }
@@ -1505,7 +1538,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1505 * to fail if the other end is not acive. In that case we don't 1538 * to fail if the other end is not acive. In that case we don't
1506 * want to scan 1539 * want to scan
1507 */ 1540 */
1508 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { 1541 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1542 || rc == H_RESOURCE) {
1509 /* 1543 /*
1510 * Wait around max init_timeout secs for the adapter to finish 1544 * Wait around max init_timeout secs for the adapter to finish
1511 * initializing. When we are done initializing, we will have a 1545 * initializing. When we are done initializing, we will have a
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 7eed0b098171..6aeb5f003c3c 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
81 int rc; 81 int rc;
82 82
83 single_host_data = hostdata; 83 single_host_data = hostdata;
84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); 84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
85 if (rc < 0) { 85 if (rc < 0) {
86 printk("viopath_open failed with rc %d in open_event_path\n", 86 printk("viopath_open failed with rc %d in open_event_path\n",
87 rc); 87 rc);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 1a9992bdfef8..01b8ac641eb8 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -156,8 +156,8 @@ static void gather_partition_info(void)
156{ 156{
157 struct device_node *rootdn; 157 struct device_node *rootdn;
158 158
159 char *ppartition_name; 159 const char *ppartition_name;
160 unsigned int *p_number_ptr; 160 const unsigned int *p_number_ptr;
161 161
162 /* Retrieve information about this partition */ 162 /* Retrieve information about this partition */
163 rootdn = find_path_device("/"); 163 rootdn = find_path_device("/");
@@ -165,14 +165,11 @@ static void gather_partition_info(void)
165 return; 165 return;
166 } 166 }
167 167
168 ppartition_name = 168 ppartition_name = get_property(rootdn, "ibm,partition-name", NULL);
169 get_property(rootdn, "ibm,partition-name", NULL);
170 if (ppartition_name) 169 if (ppartition_name)
171 strncpy(partition_name, ppartition_name, 170 strncpy(partition_name, ppartition_name,
172 sizeof(partition_name)); 171 sizeof(partition_name));
173 p_number_ptr = 172 p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL);
174 (unsigned int *)get_property(rootdn, "ibm,partition-no",
175 NULL);
176 if (p_number_ptr) 173 if (p_number_ptr)
177 partition_number = *p_number_ptr; 174 partition_number = *p_number_ptr;
178} 175}
@@ -208,6 +205,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
208 int max_requests) 205 int max_requests)
209{ 206{
210 int rc; 207 int rc;
208 int retrc;
211 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 209 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
212 210
213 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); 211 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
@@ -226,7 +224,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
226 gather_partition_info(); 224 gather_partition_info();
227 set_adapter_info(hostdata); 225 set_adapter_info(hostdata);
228 226
229 rc = plpar_hcall_norets(H_REG_CRQ, 227 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
230 vdev->unit_address, 228 vdev->unit_address,
231 queue->msg_token, PAGE_SIZE); 229 queue->msg_token, PAGE_SIZE);
232 if (rc == H_RESOURCE) 230 if (rc == H_RESOURCE)
@@ -237,6 +235,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
237 if (rc == 2) { 235 if (rc == 2) {
238 /* Adapter is good, but other end is not ready */ 236 /* Adapter is good, but other end is not ready */
239 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 237 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
238 retrc = 0;
240 } else if (rc != 0) { 239 } else if (rc != 0) {
241 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 240 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
242 goto reg_crq_failed; 241 goto reg_crq_failed;
@@ -263,7 +262,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
263 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, 262 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
264 (unsigned long)hostdata); 263 (unsigned long)hostdata);
265 264
266 return 0; 265 return retrc;
267 266
268 req_irq_failed: 267 req_irq_failed:
269 do { 268 do {
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 988e6f7af01a..94d1de55607f 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -34,7 +34,6 @@
34#define IDESCSI_VERSION "0.92" 34#define IDESCSI_VERSION "0.92"
35 35
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/config.h>
38#include <linux/types.h> 37#include <linux/types.h>
39#include <linux/string.h> 38#include <linux/string.h>
40#include <linux/kernel.h> 39#include <linux/kernel.h>
@@ -518,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
518 /* No more interrupts */ 517 /* No more interrupts */
519 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
520 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); 519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
521 local_irq_enable(); 520 local_irq_enable_in_hardirq();
522 if (status.b.check) 521 if (status.b.check)
523 rq->errors++; 522 rq->errors++;
524 idescsi_end_request (drive, 1, 0); 523 idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index cd2dffdab77a..2d95ac9c32c1 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -3,15 +3,11 @@
3 * 3 *
4 * (The IMM is the embedded controller in the ZIP Plus drive.) 4 * (The IMM is the embedded controller in the ZIP Plus drive.)
5 * 5 *
6 * Current Maintainer: David Campbell (Perth, Western Australia)
7 * campbell@torque.net
8 *
9 * My unoffical company acronym list is 21 pages long: 6 * My unoffical company acronym list is 21 pages long:
10 * FLA: Four letter acronym with built in facility for 7 * FLA: Four letter acronym with built in facility for
11 * future expansion to five letters. 8 * future expansion to five letters.
12 */ 9 */
13 10
14#include <linux/config.h>
15#include <linux/init.h> 11#include <linux/init.h>
16#include <linux/kernel.h> 12#include <linux/kernel.h>
17#include <linux/module.h> 13#include <linux/module.h>
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index dc3aebf0e365..ece936ac29c7 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -2,7 +2,7 @@
2/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in 2/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in
3 * the Iomega ZIP Plus drive 3 * the Iomega ZIP Plus drive
4 * 4 *
5 * (c) 1998 David Campbell campbell@torque.net 5 * (c) 1998 David Campbell
6 * 6 *
7 * Please note that I live in Perth, Western Australia. GMT+0800 7 * Please note that I live in Perth, Western Australia. GMT+0800
8 */ 8 */
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 883bc92b4d9a..59a4097f1254 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt)
2015 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ 2015 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
2016 write1_io(0, IO_INTR_MASK); /* allow all ints */ 2016 write1_io(0, IO_INTR_MASK); /* allow all ints */
2017 x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; 2017 x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
2018 if (request_irq(x, in2000_intr, SA_INTERRUPT, "in2000", instance)) { 2018 if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
2019 printk("in2000_detect: Unable to allocate IRQ.\n"); 2019 printk("in2000_detect: Unable to allocate IRQ.\n");
2020 detect_count--; 2020 detect_count--;
2021 continue; 2021 continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 913ba95f85bd..9e10dac61cfd 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -118,7 +118,6 @@
118#include <linux/blkdev.h> 118#include <linux/blkdev.h>
119#include <linux/spinlock.h> 119#include <linux/spinlock.h>
120#include <linux/stat.h> 120#include <linux/stat.h>
121#include <linux/config.h>
122#include <linux/kernel.h> 121#include <linux/kernel.h>
123#include <linux/proc_fs.h> 122#include <linux/proc_fs.h>
124#include <linux/string.h> 123#include <linux/string.h>
@@ -2868,7 +2867,7 @@ static int i91u_detect(struct scsi_host_template * tpnt)
2868 hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */ 2867 hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */
2869 2868
2870 /* Initial tulip chip */ 2869 /* Initial tulip chip */
2871 ok = request_irq(pHCB->HCS_Intr, i91u_intr, SA_INTERRUPT | SA_SHIRQ, "i91u", hreg); 2870 ok = request_irq(pHCB->HCS_Intr, i91u_intr, IRQF_DISABLED | IRQF_SHARED, "i91u", hreg);
2872 if (ok < 0) { 2871 if (ok < 0) {
2873 printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr); 2872 printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr);
2874 return 0; 2873 return 0;
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index 3efb1184fc39..acb67a4af2cc 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -54,7 +54,6 @@
54 **************************************************************************/ 54 **************************************************************************/
55 55
56 56
57#include <linux/config.h>
58#include <linux/types.h> 57#include <linux/types.h>
59 58
60#define ULONG unsigned long 59#define ULONG unsigned long
@@ -193,13 +192,13 @@ typedef struct {
193#define TSC_SEL_ATN_DIRECT_OUT 0x15 /* Select With ATN Sequence */ 192#define TSC_SEL_ATN_DIRECT_OUT 0x15 /* Select With ATN Sequence */
194#define TSC_SEL_ATN3_DIRECT_IN 0xB5 /* Select With ATN3 Sequence */ 193#define TSC_SEL_ATN3_DIRECT_IN 0xB5 /* Select With ATN3 Sequence */
195#define TSC_SEL_ATN3_DIRECT_OUT 0x35 /* Select With ATN3 Sequence */ 194#define TSC_SEL_ATN3_DIRECT_OUT 0x35 /* Select With ATN3 Sequence */
196#define TSC_XF_DMA_OUT_DIRECT 0x06 /* DMA Xfer Infomation out */ 195#define TSC_XF_DMA_OUT_DIRECT 0x06 /* DMA Xfer Information out */
197#define TSC_XF_DMA_IN_DIRECT 0x86 /* DMA Xfer Infomation in */ 196#define TSC_XF_DMA_IN_DIRECT 0x86 /* DMA Xfer Information in */
198 197
199#define TSC_XF_DMA_OUT 0x43 /* DMA Xfer Infomation out */ 198#define TSC_XF_DMA_OUT 0x43 /* DMA Xfer Information out */
200#define TSC_XF_DMA_IN 0xC3 /* DMA Xfer Infomation in */ 199#define TSC_XF_DMA_IN 0xC3 /* DMA Xfer Information in */
201#define TSC_XF_FIFO_OUT 0x03 /* FIFO Xfer Infomation out */ 200#define TSC_XF_FIFO_OUT 0x03 /* FIFO Xfer Information out */
202#define TSC_XF_FIFO_IN 0x83 /* FIFO Xfer Infomation in */ 201#define TSC_XF_FIFO_IN 0x83 /* FIFO Xfer Information in */
203 202
204#define TSC_MSG_ACCEPT 0x0F /* Message Accept */ 203#define TSC_MSG_ACCEPT 0x0F /* Message Accept */
205 204
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 96b65b307dd0..7ed4eef8347b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -54,7 +54,6 @@
54 * 54 *
55 */ 55 */
56 56
57#include <linux/config.h>
58#include <linux/fs.h> 57#include <linux/fs.h>
59#include <linux/init.h> 58#include <linux/init.h>
60#include <linux/types.h> 59#include <linux/types.h>
@@ -176,6 +175,8 @@ struct ipr_error_table_t ipr_error_table[] = {
176 "Qualified success"}, 175 "Qualified success"},
177 {0x01080000, 1, 1, 176 {0x01080000, 1, 1,
178 "FFFE: Soft device bus error recovered by the IOA"}, 177 "FFFE: Soft device bus error recovered by the IOA"},
178 {0x01088100, 0, 1,
179 "4101: Soft device bus fabric error"},
179 {0x01170600, 0, 1, 180 {0x01170600, 0, 1,
180 "FFF9: Device sector reassign successful"}, 181 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, 1, 182 {0x01170900, 0, 1,
@@ -226,6 +227,8 @@ struct ipr_error_table_t ipr_error_table[] = {
226 "3109: IOA timed out a device command"}, 227 "3109: IOA timed out a device command"},
227 {0x04088000, 0, 0, 228 {0x04088000, 0, 0,
228 "3120: SCSI bus is not operational"}, 229 "3120: SCSI bus is not operational"},
230 {0x04088100, 0, 1,
231 "4100: Hard device bus fabric error"},
229 {0x04118000, 0, 1, 232 {0x04118000, 0, 1,
230 "9000: IOA reserved area data check"}, 233 "9000: IOA reserved area data check"},
231 {0x04118100, 0, 1, 234 {0x04118100, 0, 1,
@@ -274,6 +277,14 @@ struct ipr_error_table_t ipr_error_table[] = {
274 "9091: Incorrect hardware configuration change has been detected"}, 277 "9091: Incorrect hardware configuration change has been detected"},
275 {0x04678000, 0, 1, 278 {0x04678000, 0, 1,
276 "9073: Invalid multi-adapter configuration"}, 279 "9073: Invalid multi-adapter configuration"},
280 {0x04678100, 0, 1,
281 "4010: Incorrect connection between cascaded expanders"},
282 {0x04678200, 0, 1,
283 "4020: Connections exceed IOA design limits"},
284 {0x04678300, 0, 1,
285 "4030: Incorrect multipath connection"},
286 {0x04679000, 0, 1,
287 "4110: Unsupported enclosure function"},
277 {0x046E0000, 0, 1, 288 {0x046E0000, 0, 1,
278 "FFF4: Command to logical unit failed"}, 289 "FFF4: Command to logical unit failed"},
279 {0x05240000, 1, 0, 290 {0x05240000, 1, 0,
@@ -298,6 +309,8 @@ struct ipr_error_table_t ipr_error_table[] = {
298 "9031: Array protection temporarily suspended, protection resuming"}, 309 "9031: Array protection temporarily suspended, protection resuming"},
299 {0x06040600, 0, 1, 310 {0x06040600, 0, 1,
300 "9040: Array protection temporarily suspended, protection resuming"}, 311 "9040: Array protection temporarily suspended, protection resuming"},
312 {0x06288000, 0, 1,
313 "3140: Device bus not ready to ready transition"},
301 {0x06290000, 0, 1, 314 {0x06290000, 0, 1,
302 "FFFB: SCSI bus was reset"}, 315 "FFFB: SCSI bus was reset"},
303 {0x06290500, 0, 0, 316 {0x06290500, 0, 0,
@@ -320,6 +333,16 @@ struct ipr_error_table_t ipr_error_table[] = {
320 "3150: SCSI bus configuration error"}, 333 "3150: SCSI bus configuration error"},
321 {0x06678100, 0, 1, 334 {0x06678100, 0, 1,
322 "9074: Asymmetric advanced function disk configuration"}, 335 "9074: Asymmetric advanced function disk configuration"},
336 {0x06678300, 0, 1,
337 "4040: Incomplete multipath connection between IOA and enclosure"},
338 {0x06678400, 0, 1,
339 "4041: Incomplete multipath connection between enclosure and device"},
340 {0x06678500, 0, 1,
341 "9075: Incomplete multipath connection between IOA and remote IOA"},
342 {0x06678600, 0, 1,
343 "9076: Configuration error, missing remote IOA"},
344 {0x06679100, 0, 1,
345 "4050: Enclosure does not support a required multipath function"},
323 {0x06690200, 0, 1, 346 {0x06690200, 0, 1,
324 "9041: Array protection temporarily suspended"}, 347 "9041: Array protection temporarily suspended"},
325 {0x06698200, 0, 1, 348 {0x06698200, 0, 1,
@@ -332,6 +355,10 @@ struct ipr_error_table_t ipr_error_table[] = {
332 "9072: Link not operational transition"}, 355 "9072: Link not operational transition"},
333 {0x066B8200, 0, 1, 356 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"}, 357 "9032: Array exposed but still protected"},
358 {0x066B9100, 0, 1,
359 "4061: Multipath redundancy level got better"},
360 {0x066B9200, 0, 1,
361 "4060: Multipath redundancy level got worse"},
335 {0x07270000, 0, 0, 362 {0x07270000, 0, 0,
336 "Failure due to other device"}, 363 "Failure due to other device"},
337 {0x07278000, 0, 1, 364 {0x07278000, 0, 1,
@@ -4100,8 +4127,7 @@ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4100{ 4127{
4101 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 4128 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4102 4129
4103 if ((be32_to_cpu(ioasa->ioasc_specific) & 4130 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4104 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4105 return 0; 4131 return 0;
4106 4132
4107 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 4133 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
@@ -4191,7 +4217,8 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4191 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 4217 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4192 break; 4218 break;
4193 default: 4219 default:
4194 scsi_cmd->result |= (DID_ERROR << 16); 4220 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4221 scsi_cmd->result |= (DID_ERROR << 16);
4195 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 4222 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4196 res->needs_sync_complete = 1; 4223 res->needs_sync_complete = 1;
4197 break; 4224 break;
@@ -6429,7 +6456,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6429 ioa_cfg->needs_hard_reset = 1; 6456 ioa_cfg->needs_hard_reset = 1;
6430 6457
6431 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 6458 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6432 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg); 6459 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
6433 6460
6434 if (rc) { 6461 if (rc) {
6435 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 6462 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 1ad24df69d70..11eaff524327 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.3" 39#define IPR_DRIVER_VERSION "2.1.4"
40#define IPR_DRIVER_DATE "(March 29, 2006)" 40#define IPR_DRIVER_DATE "(August 2, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -45,6 +45,7 @@
45 * This can be adjusted at runtime through sysfs device attributes. 45 * This can be adjusted at runtime through sysfs device attributes.
46 */ 46 */
47#define IPR_MAX_CMD_PER_LUN 6 47#define IPR_MAX_CMD_PER_LUN 6
48#define IPR_MAX_CMD_PER_ATA_LUN 1
48 49
49/* 50/*
50 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of 51 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@@ -106,7 +107,7 @@
106#define IPR_IOA_BUS 0xff 107#define IPR_IOA_BUS 0xff
107#define IPR_IOA_TARGET 0xff 108#define IPR_IOA_TARGET 0xff
108#define IPR_IOA_LUN 0xff 109#define IPR_IOA_LUN 0xff
109#define IPR_MAX_NUM_BUSES 8 110#define IPR_MAX_NUM_BUSES 16
110#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES 111#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
111 112
112#define IPR_NUM_RESET_RELOAD_RETRIES 3 113#define IPR_NUM_RESET_RELOAD_RETRIES 3
@@ -145,6 +146,7 @@
145#define IPR_LUN_RESET 0x40 146#define IPR_LUN_RESET 0x40
146#define IPR_TARGET_RESET 0x20 147#define IPR_TARGET_RESET 0x20
147#define IPR_BUS_RESET 0x10 148#define IPR_BUS_RESET 0x10
149#define IPR_ATA_PHY_RESET 0x80
148#define IPR_ID_HOST_RR_Q 0xC4 150#define IPR_ID_HOST_RR_Q 0xC4
149#define IPR_QUERY_IOA_CONFIG 0xC5 151#define IPR_QUERY_IOA_CONFIG 0xC5
150#define IPR_CANCEL_ALL_REQUESTS 0xCE 152#define IPR_CANCEL_ALL_REQUESTS 0xCE
@@ -295,7 +297,11 @@ struct ipr_std_inq_data {
295}__attribute__ ((packed)); 297}__attribute__ ((packed));
296 298
297struct ipr_config_table_entry { 299struct ipr_config_table_entry {
298 u8 service_level; 300 u8 proto;
301#define IPR_PROTO_SATA 0x02
302#define IPR_PROTO_SATA_ATAPI 0x03
303#define IPR_PROTO_SAS_STP 0x06
304#define IPR_PROTO_SAS_STP_ATAPI 0x07
299 u8 array_id; 305 u8 array_id;
300 u8 flags; 306 u8 flags;
301#define IPR_IS_IOA_RESOURCE 0x80 307#define IPR_IS_IOA_RESOURCE 0x80
@@ -307,6 +313,7 @@ struct ipr_config_table_entry {
307#define IPR_SUBTYPE_AF_DASD 0 313#define IPR_SUBTYPE_AF_DASD 0
308#define IPR_SUBTYPE_GENERIC_SCSI 1 314#define IPR_SUBTYPE_GENERIC_SCSI 1
309#define IPR_SUBTYPE_VOLUME_SET 2 315#define IPR_SUBTYPE_VOLUME_SET 2
316#define IPR_SUBTYPE_GENERIC_ATA 4
310 317
311#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) 318#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
312#define IPR_QUEUE_FROZEN_MODEL 0 319#define IPR_QUEUE_FROZEN_MODEL 0
@@ -350,6 +357,7 @@ struct ipr_cmd_pkt {
350#define IPR_RQTYPE_SCSICDB 0x00 357#define IPR_RQTYPE_SCSICDB 0x00
351#define IPR_RQTYPE_IOACMD 0x01 358#define IPR_RQTYPE_IOACMD 0x01
352#define IPR_RQTYPE_HCAM 0x02 359#define IPR_RQTYPE_HCAM 0x02
360#define IPR_RQTYPE_ATA_PASSTHRU 0x04
353 361
354 u8 luntar_luntrn; 362 u8 luntar_luntrn;
355 363
@@ -373,6 +381,37 @@ struct ipr_cmd_pkt {
373 __be16 timeout; 381 __be16 timeout;
374}__attribute__ ((packed, aligned(4))); 382}__attribute__ ((packed, aligned(4)));
375 383
384struct ipr_ioarcb_ata_regs {
385 u8 flags;
386#define IPR_ATA_FLAG_PACKET_CMD 0x80
387#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
388#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
389 u8 reserved[3];
390
391 __be16 data;
392 u8 feature;
393 u8 nsect;
394 u8 lbal;
395 u8 lbam;
396 u8 lbah;
397 u8 device;
398 u8 command;
399 u8 reserved2[3];
400 u8 hob_feature;
401 u8 hob_nsect;
402 u8 hob_lbal;
403 u8 hob_lbam;
404 u8 hob_lbah;
405 u8 ctl;
406}__attribute__ ((packed, aligned(4)));
407
408struct ipr_ioarcb_add_data {
409 union {
410 struct ipr_ioarcb_ata_regs regs;
411 __be32 add_cmd_parms[10];
412 }u;
413}__attribute__ ((packed, aligned(4)));
414
376/* IOA Request Control Block 128 bytes */ 415/* IOA Request Control Block 128 bytes */
377struct ipr_ioarcb { 416struct ipr_ioarcb {
378 __be32 ioarcb_host_pci_addr; 417 __be32 ioarcb_host_pci_addr;
@@ -397,7 +436,7 @@ struct ipr_ioarcb {
397 struct ipr_cmd_pkt cmd_pkt; 436 struct ipr_cmd_pkt cmd_pkt;
398 437
399 __be32 add_cmd_parms_len; 438 __be32 add_cmd_parms_len;
400 __be32 add_cmd_parms[10]; 439 struct ipr_ioarcb_add_data add_data;
401}__attribute__((packed, aligned (4))); 440}__attribute__((packed, aligned (4)));
402 441
403struct ipr_ioadl_desc { 442struct ipr_ioadl_desc {
@@ -433,6 +472,21 @@ struct ipr_ioasa_gpdd {
433 __be32 ioa_data[2]; 472 __be32 ioa_data[2];
434}__attribute__((packed, aligned (4))); 473}__attribute__((packed, aligned (4)));
435 474
475struct ipr_ioasa_gata {
476 u8 error;
477 u8 nsect; /* Interrupt reason */
478 u8 lbal;
479 u8 lbam;
480 u8 lbah;
481 u8 device;
482 u8 status;
483 u8 alt_status; /* ATA CTL */
484 u8 hob_nsect;
485 u8 hob_lbal;
486 u8 hob_lbam;
487 u8 hob_lbah;
488}__attribute__((packed, aligned (4)));
489
436struct ipr_auto_sense { 490struct ipr_auto_sense {
437 __be16 auto_sense_len; 491 __be16 auto_sense_len;
438 __be16 ioa_data_len; 492 __be16 ioa_data_len;
@@ -466,6 +520,7 @@ struct ipr_ioasa {
466 __be32 ioasc_specific; /* status code specific field */ 520 __be32 ioasc_specific; /* status code specific field */
467#define IPR_ADDITIONAL_STATUS_FMT 0x80000000 521#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
468#define IPR_AUTOSENSE_VALID 0x40000000 522#define IPR_AUTOSENSE_VALID 0x40000000
523#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
469#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff 524#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
470#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) 525#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
471#define IPR_FIELD_POINTER_MASK 0x0000ffff 526#define IPR_FIELD_POINTER_MASK 0x0000ffff
@@ -474,6 +529,7 @@ struct ipr_ioasa {
474 struct ipr_ioasa_vset vset; 529 struct ipr_ioasa_vset vset;
475 struct ipr_ioasa_af_dasd dasd; 530 struct ipr_ioasa_af_dasd dasd;
476 struct ipr_ioasa_gpdd gpdd; 531 struct ipr_ioasa_gpdd gpdd;
532 struct ipr_ioasa_gata gata;
477 } u; 533 } u;
478 534
479 struct ipr_auto_sense auto_sense; 535 struct ipr_auto_sense auto_sense;
@@ -1308,6 +1364,22 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1308} 1364}
1309 1365
1310/** 1366/**
1367 * ipr_is_gata - Determine if a resource is a generic ATA resource
1368 * @res: resource entry struct
1369 *
1370 * Return value:
1371 * 1 if GATA / 0 if not GATA
1372 **/
1373static inline int ipr_is_gata(struct ipr_resource_entry *res)
1374{
1375 if (!ipr_is_ioa_resource(res) &&
1376 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
1377 return 1;
1378 else
1379 return 0;
1380}
1381
1382/**
1311 * ipr_is_naca_model - Determine if a resource is using NACA queueing model 1383 * ipr_is_naca_model - Determine if a resource is using NACA queueing model
1312 * @res: resource entry struct 1384 * @res: resource entry struct
1313 * 1385 *
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 5353b28b2939..3c639286ec1e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -196,7 +196,6 @@
196#include <linux/module.h> 196#include <linux/module.h>
197 197
198#include <linux/stat.h> 198#include <linux/stat.h>
199#include <linux/config.h>
200 199
201#include <linux/spinlock.h> 200#include <linux/spinlock.h>
202#include <linux/init.h> 201#include <linux/init.h>
@@ -6438,7 +6437,7 @@ ips_erase_bios(ips_ha_t * ha)
6438 /* VPP failure */ 6437 /* VPP failure */
6439 return (1); 6438 return (1);
6440 6439
6441 /* check for succesful flash */ 6440 /* check for successful flash */
6442 if (status & 0x30) 6441 if (status & 0x30)
6443 /* sequence error */ 6442 /* sequence error */
6444 return (1); 6443 return (1);
@@ -6550,7 +6549,7 @@ ips_erase_bios_memio(ips_ha_t * ha)
6550 /* VPP failure */ 6549 /* VPP failure */
6551 return (1); 6550 return (1);
6552 6551
6553 /* check for succesful flash */ 6552 /* check for successful flash */
6554 if (status & 0x30) 6553 if (status & 0x30)
6555 /* sequence error */ 6554 /* sequence error */
6556 return (1); 6555 return (1);
@@ -7008,7 +7007,7 @@ ips_register_scsi(int index)
7008 memcpy(ha, oldha, sizeof (ips_ha_t)); 7007 memcpy(ha, oldha, sizeof (ips_ha_t));
7009 free_irq(oldha->irq, oldha); 7008 free_irq(oldha->irq, oldha);
7010 /* Install the interrupt handler with the new ha */ 7009 /* Install the interrupt handler with the new ha */
7011 if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { 7010 if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7012 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7011 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7013 "Unable to install interrupt handler\n"); 7012 "Unable to install interrupt handler\n");
7014 scsi_host_put(sh); 7013 scsi_host_put(sh);
@@ -7420,7 +7419,7 @@ ips_init_phase2(int index)
7420 } 7419 }
7421 7420
7422 /* Install the interrupt handler */ 7421 /* Install the interrupt handler */
7423 if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { 7422 if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7424 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7423 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7425 "Unable to install interrupt handler\n"); 7424 "Unable to install interrupt handler\n");
7426 return ips_abort_init(ha, index); 7425 return ips_abort_init(ha, index);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b4743a9ecc80..0a9dbc59663f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
43 43
44#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
45 45
46#define ISCSI_TCP_VERSION "1.0-595"
47
48MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>"); 47 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path"); 48MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
52MODULE_VERSION(ISCSI_TCP_VERSION);
53/* #define DEBUG_TCP */ 50/* #define DEBUG_TCP */
54#define DEBUG_ASSERT 51#define DEBUG_ASSERT
55 52
@@ -111,8 +108,8 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
111{ 108{
112 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
113 110
114 crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); 111 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
115 buf->sg.length += sizeof(uint32_t); 112 buf->sg.length = tcp_conn->hdr_size;
116} 113}
117 114
118static inline int 115static inline int
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
185 * must be called with session lock 182 * must be called with session lock
186 */ 183 */
187static void 184static void
188__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 185iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
189{ 186{
190 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 187 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
188 struct iscsi_r2t_info *r2t;
191 struct scsi_cmnd *sc; 189 struct scsi_cmnd *sc;
192 190
191 /* flush ctask's r2t queues */
192 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
193 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
194 sizeof(void*));
195 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
196 }
197
193 sc = ctask->sc; 198 sc = ctask->sc;
194 if (unlikely(!sc)) 199 if (unlikely(!sc))
195 return; 200 return;
@@ -276,7 +281,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
276{ 281{
277 struct iscsi_data *hdr; 282 struct iscsi_data *hdr;
278 struct scsi_cmnd *sc = ctask->sc; 283 struct scsi_cmnd *sc = ctask->sc;
279 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
280 284
281 hdr = &r2t->dtask.hdr; 285 hdr = &r2t->dtask.hdr;
282 memset(hdr, 0, sizeof(struct iscsi_data)); 286 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -331,10 +335,12 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
331 sg_count += sg->length; 335 sg_count += sg->length;
332 } 336 }
333 BUG_ON(r2t->sg == NULL); 337 BUG_ON(r2t->sg == NULL);
334 } else 338 } else {
335 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 339 iscsi_buf_init_iov(&r2t->sendbuf,
336 (char*)sc->request_buffer + r2t->data_offset, 340 (char*)sc->request_buffer + r2t->data_offset,
337 r2t->data_count); 341 r2t->data_count);
342 r2t->sg = NULL;
343 }
338} 344}
339 345
340/** 346/**
@@ -353,8 +359,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
353 int r2tsn = be32_to_cpu(rhdr->r2tsn); 359 int r2tsn = be32_to_cpu(rhdr->r2tsn);
354 int rc; 360 int rc;
355 361
356 if (tcp_conn->in.datalen) 362 if (tcp_conn->in.datalen) {
363 printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
364 tcp_conn->in.datalen);
357 return ISCSI_ERR_DATALEN; 365 return ISCSI_ERR_DATALEN;
366 }
358 367
359 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) 368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
360 return ISCSI_ERR_R2TSN; 369 return ISCSI_ERR_R2TSN;
@@ -374,20 +383,29 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
374 spin_unlock(&session->lock); 383 spin_unlock(&session->lock);
375 return 0; 384 return 0;
376 } 385 }
386
377 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 387 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
378 BUG_ON(!rc); 388 BUG_ON(!rc);
379 389
380 r2t->exp_statsn = rhdr->statsn; 390 r2t->exp_statsn = rhdr->statsn;
381 r2t->data_length = be32_to_cpu(rhdr->data_length); 391 r2t->data_length = be32_to_cpu(rhdr->data_length);
382 if (r2t->data_length == 0 || 392 if (r2t->data_length == 0) {
383 r2t->data_length > session->max_burst) { 393 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
384 spin_unlock(&session->lock); 394 spin_unlock(&session->lock);
385 return ISCSI_ERR_DATALEN; 395 return ISCSI_ERR_DATALEN;
386 } 396 }
387 397
398 if (r2t->data_length > session->max_burst)
399 debug_scsi("invalid R2T with data len %u and max burst %u."
400 "Attempting to execute request.\n",
401 r2t->data_length, session->max_burst);
402
388 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 403 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
389 if (r2t->data_offset + r2t->data_length > ctask->total_length) { 404 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
390 spin_unlock(&session->lock); 405 spin_unlock(&session->lock);
406 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
407 "offset %u and total length %d\n", r2t->data_length,
408 r2t->data_offset, ctask->total_length);
391 return ISCSI_ERR_DATALEN; 409 return ISCSI_ERR_DATALEN;
392 } 410 }
393 411
@@ -399,7 +417,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
399 tcp_ctask->exp_r2tsn = r2tsn + 1; 417 tcp_ctask->exp_r2tsn = r2tsn + 1;
400 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 418 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
401 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 419 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
402 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 420 list_move_tail(&ctask->running, &conn->xmitqueue);
403 421
404 scsi_queue_work(session->host, &conn->xmitwork); 422 scsi_queue_work(session->host, &conn->xmitwork);
405 conn->r2t_pdus_cnt++; 423 conn->r2t_pdus_cnt++;
@@ -450,7 +468,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
450 468
451 sg_init_one(&sg, (u8 *)hdr, 469 sg_init_one(&sg, (u8 *)hdr,
452 sizeof(struct iscsi_hdr) + ahslen); 470 sizeof(struct iscsi_hdr) + ahslen);
453 crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); 471 crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
472 (u8 *)&cdgst);
454 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 473 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
455 ahslen); 474 ahslen);
456 if (cdgst != rdgst) { 475 if (cdgst != rdgst) {
@@ -477,6 +496,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
477 case ISCSI_OP_SCSI_DATA_IN: 496 case ISCSI_OP_SCSI_DATA_IN:
478 tcp_conn->in.ctask = session->cmds[itt]; 497 tcp_conn->in.ctask = session->cmds[itt];
479 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 498 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
499 if (rc)
500 return rc;
480 /* fall through */ 501 /* fall through */
481 case ISCSI_OP_SCSI_CMD_RSP: 502 case ISCSI_OP_SCSI_CMD_RSP:
482 tcp_conn->in.ctask = session->cmds[itt]; 503 tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +505,6 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
484 goto copy_hdr; 505 goto copy_hdr;
485 506
486 spin_lock(&session->lock); 507 spin_lock(&session->lock);
487 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
488 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 508 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
489 spin_unlock(&session->lock); 509 spin_unlock(&session->lock);
490 break; 510 break;
@@ -500,13 +520,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
500 break; 520 break;
501 case ISCSI_OP_LOGIN_RSP: 521 case ISCSI_OP_LOGIN_RSP:
502 case ISCSI_OP_TEXT_RSP: 522 case ISCSI_OP_TEXT_RSP:
503 case ISCSI_OP_LOGOUT_RSP:
504 case ISCSI_OP_NOOP_IN:
505 case ISCSI_OP_REJECT: 523 case ISCSI_OP_REJECT:
506 case ISCSI_OP_ASYNC_EVENT: 524 case ISCSI_OP_ASYNC_EVENT:
525 /*
526 * It is possible that we could get a PDU with a buffer larger
527 * than 8K, but there are no targets that currently do this.
528 * For now we fail until we find a vendor that needs it
529 */
530 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
531 tcp_conn->in.datalen) {
532 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
533 "but conn buffer is only %u (opcode %0x)\n",
534 tcp_conn->in.datalen,
535 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
536 rc = ISCSI_ERR_PROTO;
537 break;
538 }
539
507 if (tcp_conn->in.datalen) 540 if (tcp_conn->in.datalen)
508 goto copy_hdr; 541 goto copy_hdr;
509 /* fall through */ 542 /* fall through */
543 case ISCSI_OP_LOGOUT_RSP:
544 case ISCSI_OP_NOOP_IN:
510 case ISCSI_OP_SCSI_TMFUNC_RSP: 545 case ISCSI_OP_SCSI_TMFUNC_RSP:
511 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 546 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
512 break; 547 break;
@@ -523,7 +558,7 @@ copy_hdr:
523 * skbs to complete the command then we have to copy the header 558 * skbs to complete the command then we have to copy the header
524 * for later use 559 * for later use
525 */ 560 */
526 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < 561 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
527 (tcp_conn->in.datalen + tcp_conn->in.padding + 562 (tcp_conn->in.datalen + tcp_conn->in.padding +
528 (conn->datadgst_en ? 4 : 0))) { 563 (conn->datadgst_en ? 4 : 0))) {
529 debug_tcp("Copying header for later use. in.copy %d in.datalen" 564 debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,10 +649,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
614 * byte counters. 649 * byte counters.
615 **/ 650 **/
616static inline int 651static inline int
617iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) 652iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
618{ 653{
619 void *buf = tcp_conn->data; 654 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
620 int buf_size = tcp_conn->in.datalen;
621 int buf_left = buf_size - tcp_conn->data_copied; 655 int buf_left = buf_size - tcp_conn->data_copied;
622 int size = min(tcp_conn->in.copy, buf_left); 656 int size = min(tcp_conn->in.copy, buf_left);
623 int rc; 657 int rc;
@@ -627,7 +661,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
627 BUG_ON(size <= 0); 661 BUG_ON(size <= 0);
628 662
629 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 663 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
630 (char*)buf + tcp_conn->data_copied, size); 664 (char*)conn->data + tcp_conn->data_copied, size);
631 BUG_ON(rc); 665 BUG_ON(rc);
632 666
633 tcp_conn->in.offset += size; 667 tcp_conn->in.offset += size;
@@ -642,15 +676,15 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
642} 676}
643 677
644static inline void 678static inline void
645partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, 679partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
646 struct scatterlist *sg, int offset, int length) 680 int offset, int length)
647{ 681{
648 struct scatterlist temp; 682 struct scatterlist temp;
649 683
650 memcpy(&temp, sg, sizeof(struct scatterlist)); 684 memcpy(&temp, sg, sizeof(struct scatterlist));
651 temp.offset = offset; 685 temp.offset = offset;
652 temp.length = length; 686 temp.length = length;
653 crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); 687 crypto_hash_update(desc, &temp, length);
654} 688}
655 689
656static void 690static void
@@ -659,7 +693,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
659 struct scatterlist tmp; 693 struct scatterlist tmp;
660 694
661 sg_init_one(&tmp, buf, len); 695 sg_init_one(&tmp, buf, len);
662 crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); 696 crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
663} 697}
664 698
665static int iscsi_scsi_data_in(struct iscsi_conn *conn) 699static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@@ -713,11 +747,12 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
713 if (!rc) { 747 if (!rc) {
714 if (conn->datadgst_en) { 748 if (conn->datadgst_en) {
715 if (!offset) 749 if (!offset)
716 crypto_digest_update( 750 crypto_hash_update(
717 tcp_conn->data_rx_tfm, 751 &tcp_conn->rx_hash,
718 &sg[i], 1); 752 &sg[i], 1);
719 else 753 else
720 partial_sg_digest_update(tcp_conn, 754 partial_sg_digest_update(
755 &tcp_conn->rx_hash,
721 &sg[i], 756 &sg[i],
722 sg[i].offset + offset, 757 sg[i].offset + offset,
723 sg[i].length - offset); 758 sg[i].length - offset);
@@ -731,8 +766,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
731 /* 766 /*
732 * data-in is complete, but buffer not... 767 * data-in is complete, but buffer not...
733 */ 768 */
734 partial_sg_digest_update(tcp_conn, &sg[i], 769 partial_sg_digest_update(&tcp_conn->rx_hash,
735 sg[i].offset, sg[i].length-rc); 770 &sg[i],
771 sg[i].offset,
772 sg[i].length-rc);
736 rc = 0; 773 rc = 0;
737 break; 774 break;
738 } 775 }
@@ -745,10 +782,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
745done: 782done:
746 /* check for non-exceptional status */ 783 /* check for non-exceptional status */
747 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 784 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
748 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 785 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
749 (long)sc, sc->result, ctask->itt); 786 (long)sc, sc->result, ctask->itt,
787 tcp_conn->in.hdr->flags);
750 spin_lock(&conn->session->lock); 788 spin_lock(&conn->session->lock);
751 __iscsi_ctask_cleanup(conn, ctask);
752 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 789 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
753 spin_unlock(&conn->session->lock); 790 spin_unlock(&conn->session->lock);
754 } 791 }
@@ -768,27 +805,23 @@ iscsi_data_recv(struct iscsi_conn *conn)
768 rc = iscsi_scsi_data_in(conn); 805 rc = iscsi_scsi_data_in(conn);
769 break; 806 break;
770 case ISCSI_OP_SCSI_CMD_RSP: 807 case ISCSI_OP_SCSI_CMD_RSP:
771 spin_lock(&conn->session->lock);
772 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
773 spin_unlock(&conn->session->lock);
774 case ISCSI_OP_TEXT_RSP: 808 case ISCSI_OP_TEXT_RSP:
775 case ISCSI_OP_LOGIN_RSP: 809 case ISCSI_OP_LOGIN_RSP:
776 case ISCSI_OP_NOOP_IN:
777 case ISCSI_OP_ASYNC_EVENT: 810 case ISCSI_OP_ASYNC_EVENT:
778 case ISCSI_OP_REJECT: 811 case ISCSI_OP_REJECT:
779 /* 812 /*
780 * Collect data segment to the connection's data 813 * Collect data segment to the connection's data
781 * placeholder 814 * placeholder
782 */ 815 */
783 if (iscsi_tcp_copy(tcp_conn)) { 816 if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
784 rc = -EAGAIN; 817 rc = -EAGAIN;
785 goto exit; 818 goto exit;
786 } 819 }
787 820
788 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, 821 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
789 tcp_conn->in.datalen); 822 tcp_conn->in.datalen);
790 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) 823 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
791 iscsi_recv_digest_update(tcp_conn, tcp_conn->data, 824 iscsi_recv_digest_update(tcp_conn, conn->data,
792 tcp_conn->in.datalen); 825 tcp_conn->in.datalen);
793 break; 826 break;
794 default: 827 default:
@@ -843,7 +876,7 @@ more:
843 if (rc == -EAGAIN) 876 if (rc == -EAGAIN)
844 goto nomore; 877 goto nomore;
845 else { 878 else {
846 iscsi_conn_failure(conn, rc); 879 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
847 return 0; 880 return 0;
848 } 881 }
849 } 882 }
@@ -853,10 +886,8 @@ more:
853 */ 886 */
854 rc = iscsi_tcp_hdr_recv(conn); 887 rc = iscsi_tcp_hdr_recv(conn);
855 if (!rc && tcp_conn->in.datalen) { 888 if (!rc && tcp_conn->in.datalen) {
856 if (conn->datadgst_en) { 889 if (conn->datadgst_en)
857 BUG_ON(!tcp_conn->data_rx_tfm); 890 crypto_hash_init(&tcp_conn->rx_hash);
858 crypto_digest_init(tcp_conn->data_rx_tfm);
859 }
860 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 891 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
861 } else if (rc) { 892 } else if (rc) {
862 iscsi_conn_failure(conn, rc); 893 iscsi_conn_failure(conn, rc);
@@ -869,10 +900,15 @@ more:
869 900
870 debug_tcp("extra data_recv offset %d copy %d\n", 901 debug_tcp("extra data_recv offset %d copy %d\n",
871 tcp_conn->in.offset, tcp_conn->in.copy); 902 tcp_conn->in.offset, tcp_conn->in.copy);
872 skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 903 rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
873 &recv_digest, 4); 904 if (rc) {
874 tcp_conn->in.offset += 4; 905 if (rc == -EAGAIN)
875 tcp_conn->in.copy -= 4; 906 goto again;
907 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
908 return 0;
909 }
910
911 memcpy(&recv_digest, conn->data, sizeof(uint32_t));
876 if (recv_digest != tcp_conn->in.datadgst) { 912 if (recv_digest != tcp_conn->in.datadgst) {
877 debug_tcp("iscsi_tcp: data digest error!" 913 debug_tcp("iscsi_tcp: data digest error!"
878 "0x%x != 0x%x\n", recv_digest, 914 "0x%x != 0x%x\n", recv_digest,
@@ -897,7 +933,7 @@ more:
897 if (rc) { 933 if (rc) {
898 if (rc == -EAGAIN) 934 if (rc == -EAGAIN)
899 goto again; 935 goto again;
900 iscsi_conn_failure(conn, rc); 936 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
901 return 0; 937 return 0;
902 } 938 }
903 tcp_conn->in.copy -= tcp_conn->in.padding; 939 tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -908,13 +944,14 @@ more:
908 tcp_conn->in.padding); 944 tcp_conn->in.padding);
909 memset(pad, 0, tcp_conn->in.padding); 945 memset(pad, 0, tcp_conn->in.padding);
910 sg_init_one(&sg, pad, tcp_conn->in.padding); 946 sg_init_one(&sg, pad, tcp_conn->in.padding);
911 crypto_digest_update(tcp_conn->data_rx_tfm, 947 crypto_hash_update(&tcp_conn->rx_hash,
912 &sg, 1); 948 &sg, sg.length);
913 } 949 }
914 crypto_digest_final(tcp_conn->data_rx_tfm, 950 crypto_hash_final(&tcp_conn->rx_hash,
915 (u8 *) & tcp_conn->in.datadgst); 951 (u8 *) &tcp_conn->in.datadgst);
916 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 952 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
917 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 953 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
954 tcp_conn->data_copied = 0;
918 } else 955 } else
919 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 956 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
920 } 957 }
@@ -1028,9 +1065,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1028} 1065}
1029 1066
1030static void 1067static void
1031iscsi_conn_restore_callbacks(struct iscsi_conn *conn) 1068iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1032{ 1069{
1033 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1034 struct sock *sk = tcp_conn->sock->sk; 1070 struct sock *sk = tcp_conn->sock->sk;
1035 1071
1036 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 1072 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1155,37 +1191,12 @@ iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1155 1191
1156static inline void 1192static inline void
1157iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, 1193iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1158 struct iscsi_cmd_task *ctask) 1194 struct iscsi_tcp_cmd_task *tcp_ctask)
1159{ 1195{
1160 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1196 crypto_hash_init(&tcp_conn->tx_hash);
1161
1162 BUG_ON(!tcp_conn->data_tx_tfm);
1163 crypto_digest_init(tcp_conn->data_tx_tfm);
1164 tcp_ctask->digest_count = 4; 1197 tcp_ctask->digest_count = 4;
1165} 1198}
1166 1199
1167static int
1168iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1169 struct iscsi_buf *buf, uint32_t *digest, int final)
1170{
1171 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1172 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1173 int rc = 0;
1174 int sent = 0;
1175
1176 if (final)
1177 crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest);
1178
1179 iscsi_buf_init_iov(buf, (char*)digest, 4);
1180 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1181 if (rc) {
1182 tcp_ctask->datadigest = *digest;
1183 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST;
1184 } else
1185 tcp_ctask->digest_count = 4;
1186 return rc;
1187}
1188
1189/** 1200/**
1190 * iscsi_solicit_data_cont - initialize next Data-Out 1201 * iscsi_solicit_data_cont - initialize next Data-Out
1191 * @conn: iscsi connection 1202 * @conn: iscsi connection
@@ -1203,7 +1214,6 @@ static void
1203iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1214iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1204 struct iscsi_r2t_info *r2t, int left) 1215 struct iscsi_r2t_info *r2t, int left)
1205{ 1216{
1206 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1207 struct iscsi_data *hdr; 1217 struct iscsi_data *hdr;
1208 struct scsi_cmnd *sc = ctask->sc; 1218 struct scsi_cmnd *sc = ctask->sc;
1209 int new_offset; 1219 int new_offset;
@@ -1232,27 +1242,30 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1232 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 1242 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1233 sizeof(struct iscsi_hdr)); 1243 sizeof(struct iscsi_hdr));
1234 1244
1235 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { 1245 if (iscsi_buf_left(&r2t->sendbuf))
1236 BUG_ON(tcp_ctask->bad_sg == r2t->sg); 1246 return;
1247
1248 if (sc->use_sg) {
1237 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1249 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1238 r2t->sg += 1; 1250 r2t->sg += 1;
1239 } else 1251 } else {
1240 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1252 iscsi_buf_init_iov(&r2t->sendbuf,
1241 (char*)sc->request_buffer + new_offset, 1253 (char*)sc->request_buffer + new_offset,
1242 r2t->data_count); 1254 r2t->data_count);
1255 r2t->sg = NULL;
1256 }
1243} 1257}
1244 1258
1245static void 1259static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
1246iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1260 unsigned long len)
1247{ 1261{
1248 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1262 tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
1249 struct iscsi_data_task *dtask; 1263 if (!tcp_ctask->pad_count)
1264 return;
1250 1265
1251 dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; 1266 tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
1252 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr, 1267 debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1253 tcp_ctask->r2t_data_count); 1268 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1254 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1255 sizeof(struct iscsi_hdr));
1256} 1269}
1257 1270
1258/** 1271/**
@@ -1280,38 +1293,20 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1280 if (sc->use_sg) { 1293 if (sc->use_sg) {
1281 struct scatterlist *sg = sc->request_buffer; 1294 struct scatterlist *sg = sc->request_buffer;
1282 1295
1283 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1296 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1284 &sg[tcp_ctask->sg_count++]); 1297 tcp_ctask->sg = sg + 1;
1285 tcp_ctask->sg = sg;
1286 tcp_ctask->bad_sg = sg + sc->use_sg; 1298 tcp_ctask->bad_sg = sg + sc->use_sg;
1287 } else 1299 } else {
1288 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1300 iscsi_buf_init_iov(&tcp_ctask->sendbuf,
1289 sc->request_buffer, 1301 sc->request_buffer,
1290 sc->request_bufflen); 1302 sc->request_bufflen);
1291 1303 tcp_ctask->sg = NULL;
1292 if (ctask->imm_count) 1304 tcp_ctask->bad_sg = NULL;
1293 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1294
1295 tcp_ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1296 if (tcp_ctask->pad_count) {
1297 tcp_ctask->pad_count = ISCSI_PAD_LEN -
1298 tcp_ctask->pad_count;
1299 debug_scsi("write padding %d bytes\n",
1300 tcp_ctask->pad_count);
1301 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1302 } 1305 }
1303 1306 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1304 if (ctask->unsol_count) 1307 "unsol count %d, unsol offset %d]\n",
1305 tcp_ctask->xmstate |= XMSTATE_UNS_HDR |
1306 XMSTATE_UNS_INIT;
1307 tcp_ctask->r2t_data_count = ctask->total_length -
1308 ctask->imm_count -
1309 ctask->unsol_count;
1310
1311 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1312 "r2t_data %d]\n",
1313 ctask->itt, ctask->total_length, ctask->imm_count, 1308 ctask->itt, ctask->total_length, ctask->imm_count,
1314 ctask->unsol_count, tcp_ctask->r2t_data_count); 1309 ctask->unsol_count, ctask->unsol_offset);
1315 } else 1310 } else
1316 tcp_ctask->xmstate = XMSTATE_R_HDR; 1311 tcp_ctask->xmstate = XMSTATE_R_HDR;
1317 1312
@@ -1393,8 +1388,8 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1393} 1388}
1394 1389
1395static inline int 1390static inline int
1396handle_xmstate_r_hdr(struct iscsi_conn *conn, 1391iscsi_send_read_hdr(struct iscsi_conn *conn,
1397 struct iscsi_tcp_cmd_task *tcp_ctask) 1392 struct iscsi_tcp_cmd_task *tcp_ctask)
1398{ 1393{
1399 int rc; 1394 int rc;
1400 1395
@@ -1412,7 +1407,7 @@ handle_xmstate_r_hdr(struct iscsi_conn *conn,
1412} 1407}
1413 1408
1414static inline int 1409static inline int
1415handle_xmstate_w_hdr(struct iscsi_conn *conn, 1410iscsi_send_write_hdr(struct iscsi_conn *conn,
1416 struct iscsi_cmd_task *ctask) 1411 struct iscsi_cmd_task *ctask)
1417{ 1412{
1418 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1413 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
@@ -1423,85 +1418,126 @@ handle_xmstate_w_hdr(struct iscsi_conn *conn,
1423 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1418 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1424 (u8*)tcp_ctask->hdrext); 1419 (u8*)tcp_ctask->hdrext);
1425 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); 1420 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
1426 if (rc) 1421 if (rc) {
1427 tcp_ctask->xmstate |= XMSTATE_W_HDR; 1422 tcp_ctask->xmstate |= XMSTATE_W_HDR;
1428 return rc; 1423 return rc;
1424 }
1425
1426 if (ctask->imm_count) {
1427 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1428 iscsi_set_padding(tcp_ctask, ctask->imm_count);
1429
1430 if (ctask->conn->datadgst_en) {
1431 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1432 tcp_ctask->immdigest = 0;
1433 }
1434 }
1435
1436 if (ctask->unsol_count)
1437 tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1438 return 0;
1429} 1439}
1430 1440
1431static inline int 1441static int
1432handle_xmstate_data_digest(struct iscsi_conn *conn, 1442iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1433 struct iscsi_cmd_task *ctask)
1434{ 1443{
1435 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1444 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1436 int rc; 1445 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1446 int sent = 0, rc;
1437 1447
1438 tcp_ctask->xmstate &= ~XMSTATE_DATA_DIGEST; 1448 if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
1439 debug_tcp("resent data digest 0x%x\n", tcp_ctask->datadigest); 1449 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
1440 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1450 tcp_ctask->pad_count);
1441 &tcp_ctask->datadigest, 0); 1451 if (conn->datadgst_en)
1452 crypto_hash_update(&tcp_conn->tx_hash,
1453 &tcp_ctask->sendbuf.sg,
1454 tcp_ctask->sendbuf.sg.length);
1455 } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
1456 return 0;
1457
1458 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1459 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
1460 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1461 tcp_ctask->pad_count, ctask->itt);
1462 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
1463 &sent);
1442 if (rc) { 1464 if (rc) {
1443 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; 1465 debug_scsi("padding send failed %d\n", rc);
1444 debug_tcp("resent data digest 0x%x fail!\n", 1466 tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
1445 tcp_ctask->datadigest);
1446 } 1467 }
1447
1448 return rc; 1468 return rc;
1449} 1469}
1450 1470
1451static inline int 1471static int
1452handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1472iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1473 struct iscsi_buf *buf, uint32_t *digest)
1453{ 1474{
1454 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1475 struct iscsi_tcp_cmd_task *tcp_ctask;
1455 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1476 struct iscsi_tcp_conn *tcp_conn;
1456 int rc; 1477 int rc, sent = 0;
1457 1478
1458 BUG_ON(!ctask->imm_count); 1479 if (!conn->datadgst_en)
1459 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; 1480 return 0;
1460 1481
1461 if (conn->datadgst_en) { 1482 tcp_ctask = ctask->dd_data;
1462 iscsi_data_digest_init(tcp_conn, ctask); 1483 tcp_conn = conn->dd_data;
1463 tcp_ctask->immdigest = 0;
1464 }
1465 1484
1466 for (;;) { 1485 if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
1467 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1486 crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1468 &ctask->imm_count, &tcp_ctask->sent); 1487 iscsi_buf_init_iov(buf, (char*)digest, 4);
1469 if (rc) { 1488 }
1470 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1489 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
1471 if (conn->datadgst_en) {
1472 crypto_digest_final(tcp_conn->data_tx_tfm,
1473 (u8*)&tcp_ctask->immdigest);
1474 debug_tcp("tx imm sendpage fail 0x%x\n",
1475 tcp_ctask->datadigest);
1476 }
1477 return rc;
1478 }
1479 if (conn->datadgst_en)
1480 crypto_digest_update(tcp_conn->data_tx_tfm,
1481 &tcp_ctask->sendbuf.sg, 1);
1482 1490
1483 if (!ctask->imm_count) 1491 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1484 break; 1492 if (!rc)
1485 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1493 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
1486 &tcp_ctask->sg[tcp_ctask->sg_count++]); 1494 ctask->itt);
1495 else {
1496 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1497 *digest, ctask->itt);
1498 tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
1487 } 1499 }
1500 return rc;
1501}
1488 1502
1489 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { 1503static int
1490 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1504iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
1491 &tcp_ctask->immdigest, 1); 1505 struct scatterlist **sg, int *sent, int *count,
1492 if (rc) { 1506 struct iscsi_buf *digestbuf, uint32_t *digest)
1493 debug_tcp("sending imm digest 0x%x fail!\n", 1507{
1494 tcp_ctask->immdigest); 1508 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1495 return rc; 1509 struct iscsi_conn *conn = ctask->conn;
1510 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1511 int rc, buf_sent, offset;
1512
1513 while (*count) {
1514 buf_sent = 0;
1515 offset = sendbuf->sent;
1516
1517 rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
1518 *sent = *sent + buf_sent;
1519 if (buf_sent && conn->datadgst_en)
1520 partial_sg_digest_update(&tcp_conn->tx_hash,
1521 &sendbuf->sg, sendbuf->sg.offset + offset,
1522 buf_sent);
1523 if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
1524 iscsi_buf_init_sg(sendbuf, *sg);
1525 *sg = *sg + 1;
1496 } 1526 }
1497 debug_tcp("sending imm digest 0x%x\n", tcp_ctask->immdigest); 1527
1528 if (rc)
1529 return rc;
1498 } 1530 }
1499 1531
1500 return 0; 1532 rc = iscsi_send_padding(conn, ctask);
1533 if (rc)
1534 return rc;
1535
1536 return iscsi_send_digest(conn, ctask, digestbuf, digest);
1501} 1537}
1502 1538
1503static inline int 1539static int
1504handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1540iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1505{ 1541{
1506 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1542 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1507 struct iscsi_data_task *dtask; 1543 struct iscsi_data_task *dtask;
@@ -1509,12 +1545,17 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1509 1545
1510 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1546 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1511 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { 1547 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
1512 iscsi_unsolicit_data_init(conn, ctask); 1548 dtask = &tcp_ctask->unsol_dtask;
1513 dtask = tcp_ctask->dtask; 1549
1550 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
1551 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1552 sizeof(struct iscsi_hdr));
1514 if (conn->hdrdgst_en) 1553 if (conn->hdrdgst_en)
1515 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1554 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1516 (u8*)dtask->hdrext); 1555 (u8*)dtask->hdrext);
1556
1517 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; 1557 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
1558 iscsi_set_padding(tcp_ctask, ctask->data_count);
1518 } 1559 }
1519 1560
1520 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); 1561 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
@@ -1524,254 +1565,138 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1524 return rc; 1565 return rc;
1525 } 1566 }
1526 1567
1568 if (conn->datadgst_en) {
1569 dtask = &tcp_ctask->unsol_dtask;
1570 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1571 dtask->digest = 0;
1572 }
1573
1527 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", 1574 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1528 ctask->itt, ctask->unsol_count, tcp_ctask->sent); 1575 ctask->itt, ctask->unsol_count, tcp_ctask->sent);
1529 return 0; 1576 return 0;
1530} 1577}
1531 1578
1532static inline int 1579static int
1533handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1580iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1534{ 1581{
1535 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1582 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1536 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1537 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1538 int rc; 1583 int rc;
1539 1584
1540 BUG_ON(!ctask->data_count); 1585 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
1541 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; 1586 BUG_ON(!ctask->unsol_count);
1542 1587 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
1543 if (conn->datadgst_en) { 1588send_hdr:
1544 iscsi_data_digest_init(tcp_conn, ctask); 1589 rc = iscsi_send_unsol_hdr(conn, ctask);
1545 dtask->digest = 0; 1590 if (rc)
1591 return rc;
1546 } 1592 }
1547 1593
1548 for (;;) { 1594 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1595 struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
1549 int start = tcp_ctask->sent; 1596 int start = tcp_ctask->sent;
1550 1597
1551 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1598 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1552 &ctask->data_count, &tcp_ctask->sent); 1599 &tcp_ctask->sent, &ctask->data_count,
1553 if (rc) { 1600 &dtask->digestbuf, &dtask->digest);
1554 ctask->unsol_count -= tcp_ctask->sent - start;
1555 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1556 /* will continue with this ctask later.. */
1557 if (conn->datadgst_en) {
1558 crypto_digest_final(tcp_conn->data_tx_tfm,
1559 (u8 *)&dtask->digest);
1560 debug_tcp("tx uns data fail 0x%x\n",
1561 dtask->digest);
1562 }
1563 return rc;
1564 }
1565
1566 BUG_ON(tcp_ctask->sent > ctask->total_length);
1567 ctask->unsol_count -= tcp_ctask->sent - start; 1601 ctask->unsol_count -= tcp_ctask->sent - start;
1568 1602 if (rc)
1603 return rc;
1604 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
1569 /* 1605 /*
1570 * XXX:we may run here with un-initial sendbuf. 1606 * Done with the Data-Out. Next, check if we need
1571 * so pass it 1607 * to send another unsolicited Data-Out.
1572 */ 1608 */
1573 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1609 if (ctask->unsol_count) {
1574 crypto_digest_update(tcp_conn->data_tx_tfm, 1610 debug_scsi("sending more uns\n");
1575 &tcp_ctask->sendbuf.sg, 1); 1611 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1576 1612 goto send_hdr;
1577 if (!ctask->data_count)
1578 break;
1579 iscsi_buf_init_sg(&tcp_ctask->sendbuf,
1580 &tcp_ctask->sg[tcp_ctask->sg_count++]);
1581 }
1582 BUG_ON(ctask->unsol_count < 0);
1583
1584 /*
1585 * Done with the Data-Out. Next, check if we need
1586 * to send another unsolicited Data-Out.
1587 */
1588 if (ctask->unsol_count) {
1589 if (conn->datadgst_en) {
1590 rc = iscsi_digest_final_send(conn, ctask,
1591 &dtask->digestbuf,
1592 &dtask->digest, 1);
1593 if (rc) {
1594 debug_tcp("send uns digest 0x%x fail\n",
1595 dtask->digest);
1596 return rc;
1597 }
1598 debug_tcp("sending uns digest 0x%x, more uns\n",
1599 dtask->digest);
1600 } 1613 }
1601 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1602 return 1;
1603 } 1614 }
1604
1605 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) {
1606 rc = iscsi_digest_final_send(conn, ctask,
1607 &dtask->digestbuf,
1608 &dtask->digest, 1);
1609 if (rc) {
1610 debug_tcp("send last uns digest 0x%x fail\n",
1611 dtask->digest);
1612 return rc;
1613 }
1614 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1615 }
1616
1617 return 0; 1615 return 0;
1618} 1616}
1619 1617
1620static inline int 1618static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
1621handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1619 struct iscsi_cmd_task *ctask)
1622{ 1620{
1623 struct iscsi_session *session = conn->session;
1624 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1625 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1621 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1626 struct iscsi_r2t_info *r2t = tcp_ctask->r2t; 1622 struct iscsi_session *session = conn->session;
1627 struct iscsi_data_task *dtask = &r2t->dtask; 1623 struct iscsi_r2t_info *r2t;
1624 struct iscsi_data_task *dtask;
1628 int left, rc; 1625 int left, rc;
1629 1626
1630 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; 1627 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1631 tcp_ctask->dtask = dtask; 1628 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1632
1633 if (conn->datadgst_en) {
1634 iscsi_data_digest_init(tcp_conn, ctask);
1635 dtask->digest = 0;
1636 }
1637solicit_again:
1638 /*
1639 * send Data-Out whitnin this R2T sequence.
1640 */
1641 if (!r2t->data_count)
1642 goto data_out_done;
1643
1644 rc = iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent);
1645 if (rc) {
1646 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1629 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1647 /* will continue with this ctask later.. */ 1630 if (!tcp_ctask->r2t)
1648 if (conn->datadgst_en) { 1631 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1649 crypto_digest_final(tcp_conn->data_tx_tfm, 1632 sizeof(void*));
1650 (u8 *)&dtask->digest); 1633send_hdr:
1651 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1634 r2t = tcp_ctask->r2t;
1652 } 1635 dtask = &r2t->dtask;
1653 return rc;
1654 }
1655 1636
1656 BUG_ON(r2t->data_count < 0); 1637 if (conn->hdrdgst_en)
1657 if (conn->datadgst_en) 1638 iscsi_hdr_digest(conn, &r2t->headbuf,
1658 crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, 1639 (u8*)dtask->hdrext);
1659 1); 1640 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1660 1641 if (rc) {
1661 if (r2t->data_count) { 1642 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1662 BUG_ON(ctask->sc->use_sg == 0); 1643 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1663 if (!iscsi_buf_left(&r2t->sendbuf)) { 1644 return rc;
1664 BUG_ON(tcp_ctask->bad_sg == r2t->sg);
1665 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1666 r2t->sg += 1;
1667 } 1645 }
1668 goto solicit_again;
1669 }
1670 1646
1671data_out_done:
1672 /*
1673 * Done with this Data-Out. Next, check if we have
1674 * to send another Data-Out for this R2T.
1675 */
1676 BUG_ON(r2t->data_length - r2t->sent < 0);
1677 left = r2t->data_length - r2t->sent;
1678 if (left) {
1679 if (conn->datadgst_en) { 1647 if (conn->datadgst_en) {
1680 rc = iscsi_digest_final_send(conn, ctask, 1648 iscsi_data_digest_init(conn->dd_data, tcp_ctask);
1681 &dtask->digestbuf, 1649 dtask->digest = 0;
1682 &dtask->digest, 1);
1683 if (rc) {
1684 debug_tcp("send r2t data digest 0x%x"
1685 "fail\n", dtask->digest);
1686 return rc;
1687 }
1688 debug_tcp("r2t data send digest 0x%x\n",
1689 dtask->digest);
1690 } 1650 }
1691 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1692 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1693 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1694 return 1;
1695 }
1696
1697 /*
1698 * Done with this R2T. Check if there are more
1699 * outstanding R2Ts ready to be processed.
1700 */
1701 BUG_ON(tcp_ctask->r2t_data_count - r2t->data_length < 0);
1702 if (conn->datadgst_en) {
1703 rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
1704 &dtask->digest, 1);
1705 if (rc) {
1706 debug_tcp("send last r2t data digest 0x%x"
1707 "fail\n", dtask->digest);
1708 return rc;
1709 }
1710 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
1711 }
1712 1651
1713 tcp_ctask->r2t_data_count -= r2t->data_length; 1652 iscsi_set_padding(tcp_ctask, r2t->data_count);
1714 tcp_ctask->r2t = NULL; 1653 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1715 spin_lock_bh(&session->lock); 1654 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1716 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 1655 r2t->sent);
1717 spin_unlock_bh(&session->lock);
1718 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
1719 tcp_ctask->r2t = r2t;
1720 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1721 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1722 return 1;
1723 } 1656 }
1724 1657
1725 return 0; 1658 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1726} 1659 r2t = tcp_ctask->r2t;
1660 dtask = &r2t->dtask;
1727 1661
1728static inline int 1662 rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
1729handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1663 &r2t->sent, &r2t->data_count,
1730{ 1664 &dtask->digestbuf, &dtask->digest);
1731 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1665 if (rc)
1732 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1666 return rc;
1733 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1667 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1734 int sent, rc;
1735 1668
1736 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1669 /*
1737 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1670 * Done with this Data-Out. Next, check if we have
1738 tcp_ctask->pad_count); 1671 * to send another Data-Out for this R2T.
1739 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, 1672 */
1740 &sent); 1673 BUG_ON(r2t->data_length - r2t->sent < 0);
1741 if (rc) { 1674 left = r2t->data_length - r2t->sent;
1742 tcp_ctask->xmstate |= XMSTATE_W_PAD; 1675 if (left) {
1743 return rc; 1676 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1744 } 1677 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1678 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1679 goto send_hdr;
1680 }
1745 1681
1746 if (conn->datadgst_en) { 1682 /*
1747 crypto_digest_update(tcp_conn->data_tx_tfm, 1683 * Done with this R2T. Check if there are more
1748 &tcp_ctask->sendbuf.sg, 1); 1684 * outstanding R2Ts ready to be processed.
1749 /* imm data? */ 1685 */
1750 if (!dtask) { 1686 spin_lock_bh(&session->lock);
1751 rc = iscsi_digest_final_send(conn, ctask, 1687 tcp_ctask->r2t = NULL;
1752 &tcp_ctask->immbuf, 1688 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
1753 &tcp_ctask->immdigest, 1); 1689 sizeof(void*));
1754 if (rc) { 1690 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
1755 debug_tcp("send padding digest 0x%x" 1691 sizeof(void*))) {
1756 "fail!\n", tcp_ctask->immdigest); 1692 tcp_ctask->r2t = r2t;
1757 return rc; 1693 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1758 } 1694 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1759 debug_tcp("done with padding, digest 0x%x\n", 1695 spin_unlock_bh(&session->lock);
1760 tcp_ctask->datadigest); 1696 goto send_hdr;
1761 } else {
1762 rc = iscsi_digest_final_send(conn, ctask,
1763 &dtask->digestbuf,
1764 &dtask->digest, 1);
1765 if (rc) {
1766 debug_tcp("send padding digest 0x%x"
1767 "fail\n", dtask->digest);
1768 return rc;
1769 }
1770 debug_tcp("done with padding, digest 0x%x\n",
1771 dtask->digest);
1772 } 1697 }
1698 spin_unlock_bh(&session->lock);
1773 } 1699 }
1774
1775 return 0; 1700 return 0;
1776} 1701}
1777 1702
@@ -1791,85 +1716,30 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1791 return rc; 1716 return rc;
1792 1717
1793 if (tcp_ctask->xmstate & XMSTATE_R_HDR) 1718 if (tcp_ctask->xmstate & XMSTATE_R_HDR)
1794 return handle_xmstate_r_hdr(conn, tcp_ctask); 1719 return iscsi_send_read_hdr(conn, tcp_ctask);
1795 1720
1796 if (tcp_ctask->xmstate & XMSTATE_W_HDR) { 1721 if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
1797 rc = handle_xmstate_w_hdr(conn, ctask); 1722 rc = iscsi_send_write_hdr(conn, ctask);
1798 if (rc)
1799 return rc;
1800 }
1801
1802 /* XXX: for data digest xmit recover */
1803 if (tcp_ctask->xmstate & XMSTATE_DATA_DIGEST) {
1804 rc = handle_xmstate_data_digest(conn, ctask);
1805 if (rc) 1723 if (rc)
1806 return rc; 1724 return rc;
1807 } 1725 }
1808 1726
1809 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { 1727 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1810 rc = handle_xmstate_imm_data(conn, ctask); 1728 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1729 &tcp_ctask->sent, &ctask->imm_count,
1730 &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1811 if (rc) 1731 if (rc)
1812 return rc; 1732 return rc;
1733 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
1813 } 1734 }
1814 1735
1815 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { 1736 rc = iscsi_send_unsol_pdu(conn, ctask);
1816 BUG_ON(!ctask->unsol_count); 1737 if (rc)
1817 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; 1738 return rc;
1818unsolicit_head_again:
1819 rc = handle_xmstate_uns_hdr(conn, ctask);
1820 if (rc)
1821 return rc;
1822 }
1823
1824 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1825 rc = handle_xmstate_uns_data(conn, ctask);
1826 if (rc == 1)
1827 goto unsolicit_head_again;
1828 else if (rc)
1829 return rc;
1830 goto done;
1831 }
1832
1833 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1834 struct iscsi_r2t_info *r2t;
1835
1836 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1837 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1838 if (!tcp_ctask->r2t)
1839 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1840 sizeof(void*));
1841solicit_head_again:
1842 r2t = tcp_ctask->r2t;
1843 if (conn->hdrdgst_en)
1844 iscsi_hdr_digest(conn, &r2t->headbuf,
1845 (u8*)r2t->dtask.hdrext);
1846 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1847 if (rc) {
1848 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1849 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1850 return rc;
1851 }
1852
1853 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1854 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1855 r2t->sent);
1856 }
1857
1858 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1859 rc = handle_xmstate_sol_data(conn, ctask);
1860 if (rc == 1)
1861 goto solicit_head_again;
1862 if (rc)
1863 return rc;
1864 }
1865 1739
1866done: 1740 rc = iscsi_send_sol_pdu(conn, ctask);
1867 /* 1741 if (rc)
1868 * Last thing to check is whether we need to send write 1742 return rc;
1869 * padding. Note that we check for xmstate equality, not just the bit.
1870 */
1871 if (tcp_ctask->xmstate == XMSTATE_W_PAD)
1872 rc = handle_xmstate_w_pad(conn, ctask);
1873 1743
1874 return rc; 1744 return rc;
1875} 1745}
@@ -1900,20 +1770,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1900 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1770 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1901 /* initial operational parameters */ 1771 /* initial operational parameters */
1902 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1772 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1903 tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
1904 1773
1905 /* allocate initial PDU receive place holder */ 1774 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1906 if (tcp_conn->data_size <= PAGE_SIZE) 1775 CRYPTO_ALG_ASYNC);
1907 tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); 1776 tcp_conn->tx_hash.flags = 0;
1908 else 1777 if (!tcp_conn->tx_hash.tfm)
1909 tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, 1778 goto free_tcp_conn;
1910 get_order(tcp_conn->data_size)); 1779
1911 if (!tcp_conn->data) 1780 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1912 goto max_recv_dlenght_alloc_fail; 1781 CRYPTO_ALG_ASYNC);
1782 tcp_conn->rx_hash.flags = 0;
1783 if (!tcp_conn->rx_hash.tfm)
1784 goto free_tx_tfm;
1913 1785
1914 return cls_conn; 1786 return cls_conn;
1915 1787
1916max_recv_dlenght_alloc_fail: 1788free_tx_tfm:
1789 crypto_free_hash(tcp_conn->tx_hash.tfm);
1790free_tcp_conn:
1917 kfree(tcp_conn); 1791 kfree(tcp_conn);
1918tcp_conn_alloc_fail: 1792tcp_conn_alloc_fail:
1919 iscsi_conn_teardown(cls_conn); 1793 iscsi_conn_teardown(cls_conn);
@@ -1921,6 +1795,23 @@ tcp_conn_alloc_fail:
1921} 1795}
1922 1796
1923static void 1797static void
1798iscsi_tcp_release_conn(struct iscsi_conn *conn)
1799{
1800 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1801
1802 if (!tcp_conn->sock)
1803 return;
1804
1805 sock_hold(tcp_conn->sock->sk);
1806 iscsi_conn_restore_callbacks(tcp_conn);
1807 sock_put(tcp_conn->sock->sk);
1808
1809 sock_release(tcp_conn->sock);
1810 tcp_conn->sock = NULL;
1811 conn->recv_lock = NULL;
1812}
1813
1814static void
1924iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) 1815iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1925{ 1816{
1926 struct iscsi_conn *conn = cls_conn->dd_data; 1817 struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,29 +1821,31 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1930 if (conn->hdrdgst_en || conn->datadgst_en) 1821 if (conn->hdrdgst_en || conn->datadgst_en)
1931 digest = 1; 1822 digest = 1;
1932 1823
1824 iscsi_tcp_release_conn(conn);
1933 iscsi_conn_teardown(cls_conn); 1825 iscsi_conn_teardown(cls_conn);
1934 1826
1935 /* now free tcp_conn */ 1827 /* now free tcp_conn */
1936 if (digest) { 1828 if (digest) {
1937 if (tcp_conn->tx_tfm) 1829 if (tcp_conn->tx_hash.tfm)
1938 crypto_free_tfm(tcp_conn->tx_tfm); 1830 crypto_free_hash(tcp_conn->tx_hash.tfm);
1939 if (tcp_conn->rx_tfm) 1831 if (tcp_conn->rx_hash.tfm)
1940 crypto_free_tfm(tcp_conn->rx_tfm); 1832 crypto_free_hash(tcp_conn->rx_hash.tfm);
1941 if (tcp_conn->data_tx_tfm)
1942 crypto_free_tfm(tcp_conn->data_tx_tfm);
1943 if (tcp_conn->data_rx_tfm)
1944 crypto_free_tfm(tcp_conn->data_rx_tfm);
1945 } 1833 }
1946 1834
1947 /* free conn->data, size = MaxRecvDataSegmentLength */
1948 if (tcp_conn->data_size <= PAGE_SIZE)
1949 kfree(tcp_conn->data);
1950 else
1951 free_pages((unsigned long)tcp_conn->data,
1952 get_order(tcp_conn->data_size));
1953 kfree(tcp_conn); 1835 kfree(tcp_conn);
1954} 1836}
1955 1837
1838static void
1839iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1840{
1841 struct iscsi_conn *conn = cls_conn->dd_data;
1842 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1843
1844 iscsi_conn_stop(cls_conn, flag);
1845 iscsi_tcp_release_conn(conn);
1846 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1847}
1848
1956static int 1849static int
1957iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1850iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1958 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1851 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +1894,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
2001 return 0; 1894 return 0;
2002} 1895}
2003 1896
2004static void
2005iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2006{
2007 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2008 struct iscsi_r2t_info *r2t;
2009
2010 /* flush ctask's r2t queues */
2011 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
2012 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
2013 sizeof(void*));
2014
2015 __iscsi_ctask_cleanup(conn, ctask);
2016}
2017
2018static void
2019iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
2020{
2021 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2022 struct sock *sk;
2023
2024 if (!tcp_conn->sock)
2025 return;
2026
2027 sk = tcp_conn->sock->sk;
2028 write_lock_bh(&sk->sk_callback_lock);
2029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2030 write_unlock_bh(&sk->sk_callback_lock);
2031}
2032
2033static void
2034iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
2035{
2036 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2037
2038 if (!tcp_conn->sock)
2039 return;
2040
2041 sock_hold(tcp_conn->sock->sk);
2042 iscsi_conn_restore_callbacks(conn);
2043 sock_put(tcp_conn->sock->sk);
2044
2045 sock_release(tcp_conn->sock);
2046 tcp_conn->sock = NULL;
2047 conn->recv_lock = NULL;
2048}
2049
2050/* called with host lock */ 1897/* called with host lock */
2051static void 1898static void
2052iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 1899iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +1904,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
2057 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, 1904 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
2058 sizeof(struct iscsi_hdr)); 1905 sizeof(struct iscsi_hdr));
2059 tcp_mtask->xmstate = XMSTATE_IMM_HDR; 1906 tcp_mtask->xmstate = XMSTATE_IMM_HDR;
1907 tcp_mtask->sent = 0;
2060 1908
2061 if (mtask->data_count) 1909 if (mtask->data_count)
2062 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, 1910 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2130,211 +1978,55 @@ iscsi_r2tpool_free(struct iscsi_session *session)
2130 1978
2131static int 1979static int
2132iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, 1980iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2133 uint32_t value) 1981 char *buf, int buflen)
2134{ 1982{
2135 struct iscsi_conn *conn = cls_conn->dd_data; 1983 struct iscsi_conn *conn = cls_conn->dd_data;
2136 struct iscsi_session *session = conn->session; 1984 struct iscsi_session *session = conn->session;
2137 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1985 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1986 int value;
2138 1987
2139 switch(param) { 1988 switch(param) {
2140 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
2141 char *saveptr = tcp_conn->data;
2142 gfp_t flags = GFP_KERNEL;
2143
2144 if (tcp_conn->data_size >= value) {
2145 conn->max_recv_dlength = value;
2146 break;
2147 }
2148
2149 spin_lock_bh(&session->lock);
2150 if (conn->stop_stage == STOP_CONN_RECOVER)
2151 flags = GFP_ATOMIC;
2152 spin_unlock_bh(&session->lock);
2153
2154 if (value <= PAGE_SIZE)
2155 tcp_conn->data = kmalloc(value, flags);
2156 else
2157 tcp_conn->data = (void*)__get_free_pages(flags,
2158 get_order(value));
2159 if (tcp_conn->data == NULL) {
2160 tcp_conn->data = saveptr;
2161 return -ENOMEM;
2162 }
2163 if (tcp_conn->data_size <= PAGE_SIZE)
2164 kfree(saveptr);
2165 else
2166 free_pages((unsigned long)saveptr,
2167 get_order(tcp_conn->data_size));
2168 conn->max_recv_dlength = value;
2169 tcp_conn->data_size = value;
2170 }
2171 break;
2172 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2173 conn->max_xmit_dlength = value;
2174 break;
2175 case ISCSI_PARAM_HDRDGST_EN: 1989 case ISCSI_PARAM_HDRDGST_EN:
2176 conn->hdrdgst_en = value; 1990 iscsi_set_param(cls_conn, param, buf, buflen);
2177 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1991 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
2178 if (conn->hdrdgst_en) { 1992 if (conn->hdrdgst_en)
2179 tcp_conn->hdr_size += sizeof(__u32); 1993 tcp_conn->hdr_size += sizeof(__u32);
2180 if (!tcp_conn->tx_tfm)
2181 tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c",
2182 0);
2183 if (!tcp_conn->tx_tfm)
2184 return -ENOMEM;
2185 if (!tcp_conn->rx_tfm)
2186 tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c",
2187 0);
2188 if (!tcp_conn->rx_tfm) {
2189 crypto_free_tfm(tcp_conn->tx_tfm);
2190 return -ENOMEM;
2191 }
2192 } else {
2193 if (tcp_conn->tx_tfm)
2194 crypto_free_tfm(tcp_conn->tx_tfm);
2195 if (tcp_conn->rx_tfm)
2196 crypto_free_tfm(tcp_conn->rx_tfm);
2197 }
2198 break; 1994 break;
2199 case ISCSI_PARAM_DATADGST_EN: 1995 case ISCSI_PARAM_DATADGST_EN:
2200 conn->datadgst_en = value; 1996 iscsi_set_param(cls_conn, param, buf, buflen);
2201 if (conn->datadgst_en) {
2202 if (!tcp_conn->data_tx_tfm)
2203 tcp_conn->data_tx_tfm =
2204 crypto_alloc_tfm("crc32c", 0);
2205 if (!tcp_conn->data_tx_tfm)
2206 return -ENOMEM;
2207 if (!tcp_conn->data_rx_tfm)
2208 tcp_conn->data_rx_tfm =
2209 crypto_alloc_tfm("crc32c", 0);
2210 if (!tcp_conn->data_rx_tfm) {
2211 crypto_free_tfm(tcp_conn->data_tx_tfm);
2212 return -ENOMEM;
2213 }
2214 } else {
2215 if (tcp_conn->data_tx_tfm)
2216 crypto_free_tfm(tcp_conn->data_tx_tfm);
2217 if (tcp_conn->data_rx_tfm)
2218 crypto_free_tfm(tcp_conn->data_rx_tfm);
2219 }
2220 tcp_conn->sendpage = conn->datadgst_en ? 1997 tcp_conn->sendpage = conn->datadgst_en ?
2221 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 1998 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2222 break; 1999 break;
2223 case ISCSI_PARAM_INITIAL_R2T_EN:
2224 session->initial_r2t_en = value;
2225 break;
2226 case ISCSI_PARAM_MAX_R2T: 2000 case ISCSI_PARAM_MAX_R2T:
2001 sscanf(buf, "%d", &value);
2227 if (session->max_r2t == roundup_pow_of_two(value)) 2002 if (session->max_r2t == roundup_pow_of_two(value))
2228 break; 2003 break;
2229 iscsi_r2tpool_free(session); 2004 iscsi_r2tpool_free(session);
2230 session->max_r2t = value; 2005 iscsi_set_param(cls_conn, param, buf, buflen);
2231 if (session->max_r2t & (session->max_r2t - 1)) 2006 if (session->max_r2t & (session->max_r2t - 1))
2232 session->max_r2t = roundup_pow_of_two(session->max_r2t); 2007 session->max_r2t = roundup_pow_of_two(session->max_r2t);
2233 if (iscsi_r2tpool_alloc(session)) 2008 if (iscsi_r2tpool_alloc(session))
2234 return -ENOMEM; 2009 return -ENOMEM;
2235 break; 2010 break;
2236 case ISCSI_PARAM_IMM_DATA_EN:
2237 session->imm_data_en = value;
2238 break;
2239 case ISCSI_PARAM_FIRST_BURST:
2240 session->first_burst = value;
2241 break;
2242 case ISCSI_PARAM_MAX_BURST:
2243 session->max_burst = value;
2244 break;
2245 case ISCSI_PARAM_PDU_INORDER_EN:
2246 session->pdu_inorder_en = value;
2247 break;
2248 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2249 session->dataseq_inorder_en = value;
2250 break;
2251 case ISCSI_PARAM_ERL:
2252 session->erl = value;
2253 break;
2254 case ISCSI_PARAM_IFMARKER_EN:
2255 BUG_ON(value);
2256 session->ifmarker_en = value;
2257 break;
2258 case ISCSI_PARAM_OFMARKER_EN:
2259 BUG_ON(value);
2260 session->ofmarker_en = value;
2261 break;
2262 case ISCSI_PARAM_EXP_STATSN:
2263 conn->exp_statsn = value;
2264 break;
2265 default:
2266 break;
2267 }
2268
2269 return 0;
2270}
2271
2272static int
2273iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2274 enum iscsi_param param, uint32_t *value)
2275{
2276 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2277 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2278
2279 switch(param) {
2280 case ISCSI_PARAM_INITIAL_R2T_EN:
2281 *value = session->initial_r2t_en;
2282 break;
2283 case ISCSI_PARAM_MAX_R2T:
2284 *value = session->max_r2t;
2285 break;
2286 case ISCSI_PARAM_IMM_DATA_EN:
2287 *value = session->imm_data_en;
2288 break;
2289 case ISCSI_PARAM_FIRST_BURST:
2290 *value = session->first_burst;
2291 break;
2292 case ISCSI_PARAM_MAX_BURST:
2293 *value = session->max_burst;
2294 break;
2295 case ISCSI_PARAM_PDU_INORDER_EN:
2296 *value = session->pdu_inorder_en;
2297 break;
2298 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2299 *value = session->dataseq_inorder_en;
2300 break;
2301 case ISCSI_PARAM_ERL:
2302 *value = session->erl;
2303 break;
2304 case ISCSI_PARAM_IFMARKER_EN:
2305 *value = session->ifmarker_en;
2306 break;
2307 case ISCSI_PARAM_OFMARKER_EN:
2308 *value = session->ofmarker_en;
2309 break;
2310 default: 2011 default:
2311 return -EINVAL; 2012 return iscsi_set_param(cls_conn, param, buf, buflen);
2312 } 2013 }
2313 2014
2314 return 0; 2015 return 0;
2315} 2016}
2316 2017
2317static int 2018static int
2318iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 2019iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2319 enum iscsi_param param, uint32_t *value) 2020 enum iscsi_param param, char *buf)
2320{ 2021{
2321 struct iscsi_conn *conn = cls_conn->dd_data; 2022 struct iscsi_conn *conn = cls_conn->dd_data;
2322 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2023 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2323 struct inet_sock *inet; 2024 struct inet_sock *inet;
2025 struct ipv6_pinfo *np;
2026 struct sock *sk;
2027 int len;
2324 2028
2325 switch(param) { 2029 switch(param) {
2326 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2327 *value = conn->max_recv_dlength;
2328 break;
2329 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2330 *value = conn->max_xmit_dlength;
2331 break;
2332 case ISCSI_PARAM_HDRDGST_EN:
2333 *value = conn->hdrdgst_en;
2334 break;
2335 case ISCSI_PARAM_DATADGST_EN:
2336 *value = conn->datadgst_en;
2337 break;
2338 case ISCSI_PARAM_CONN_PORT: 2030 case ISCSI_PARAM_CONN_PORT:
2339 mutex_lock(&conn->xmitmutex); 2031 mutex_lock(&conn->xmitmutex);
2340 if (!tcp_conn->sock) { 2032 if (!tcp_conn->sock) {
@@ -2343,30 +2035,9 @@ iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2343 } 2035 }
2344 2036
2345 inet = inet_sk(tcp_conn->sock->sk); 2037 inet = inet_sk(tcp_conn->sock->sk);
2346 *value = be16_to_cpu(inet->dport); 2038 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
2347 mutex_unlock(&conn->xmitmutex); 2039 mutex_unlock(&conn->xmitmutex);
2348 case ISCSI_PARAM_EXP_STATSN:
2349 *value = conn->exp_statsn;
2350 break; 2040 break;
2351 default:
2352 return -EINVAL;
2353 }
2354
2355 return 0;
2356}
2357
2358static int
2359iscsi_conn_get_str_param(struct iscsi_cls_conn *cls_conn,
2360 enum iscsi_param param, char *buf)
2361{
2362 struct iscsi_conn *conn = cls_conn->dd_data;
2363 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2364 struct sock *sk;
2365 struct inet_sock *inet;
2366 struct ipv6_pinfo *np;
2367 int len = 0;
2368
2369 switch (param) {
2370 case ISCSI_PARAM_CONN_ADDRESS: 2041 case ISCSI_PARAM_CONN_ADDRESS:
2371 mutex_lock(&conn->xmitmutex); 2042 mutex_lock(&conn->xmitmutex);
2372 if (!tcp_conn->sock) { 2043 if (!tcp_conn->sock) {
@@ -2388,7 +2059,7 @@ iscsi_conn_get_str_param(struct iscsi_cls_conn *cls_conn,
2388 mutex_unlock(&conn->xmitmutex); 2059 mutex_unlock(&conn->xmitmutex);
2389 break; 2060 break;
2390 default: 2061 default:
2391 return -EINVAL; 2062 return iscsi_conn_get_param(cls_conn, param, buf);
2392 } 2063 }
2393 2064
2394 return len; 2065 return len;
@@ -2468,8 +2139,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
2468} 2139}
2469 2140
2470static struct scsi_host_template iscsi_sht = { 2141static struct scsi_host_template iscsi_sht = {
2471 .name = "iSCSI Initiator over TCP/IP, v" 2142 .name = "iSCSI Initiator over TCP/IP",
2472 ISCSI_TCP_VERSION,
2473 .queuecommand = iscsi_queuecommand, 2143 .queuecommand = iscsi_queuecommand,
2474 .change_queue_depth = iscsi_change_queue_depth, 2144 .change_queue_depth = iscsi_change_queue_depth,
2475 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2145 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2501,7 +2171,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
2501 ISCSI_ERL | 2171 ISCSI_ERL |
2502 ISCSI_CONN_PORT | 2172 ISCSI_CONN_PORT |
2503 ISCSI_CONN_ADDRESS | 2173 ISCSI_CONN_ADDRESS |
2504 ISCSI_EXP_STATSN, 2174 ISCSI_EXP_STATSN |
2175 ISCSI_PERSISTENT_PORT |
2176 ISCSI_PERSISTENT_ADDRESS |
2177 ISCSI_TARGET_NAME |
2178 ISCSI_TPGT,
2505 .host_template = &iscsi_sht, 2179 .host_template = &iscsi_sht,
2506 .conndata_size = sizeof(struct iscsi_conn), 2180 .conndata_size = sizeof(struct iscsi_conn),
2507 .max_conn = 1, 2181 .max_conn = 1,
@@ -2514,14 +2188,10 @@ static struct iscsi_transport iscsi_tcp_transport = {
2514 .bind_conn = iscsi_tcp_conn_bind, 2188 .bind_conn = iscsi_tcp_conn_bind,
2515 .destroy_conn = iscsi_tcp_conn_destroy, 2189 .destroy_conn = iscsi_tcp_conn_destroy,
2516 .set_param = iscsi_conn_set_param, 2190 .set_param = iscsi_conn_set_param,
2517 .get_conn_param = iscsi_conn_get_param, 2191 .get_conn_param = iscsi_tcp_conn_get_param,
2518 .get_conn_str_param = iscsi_conn_get_str_param,
2519 .get_session_param = iscsi_session_get_param, 2192 .get_session_param = iscsi_session_get_param,
2520 .start_conn = iscsi_conn_start, 2193 .start_conn = iscsi_conn_start,
2521 .stop_conn = iscsi_conn_stop, 2194 .stop_conn = iscsi_tcp_conn_stop,
2522 /* these are called as part of conn recovery */
2523 .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
2524 .terminate_conn = iscsi_tcp_terminate_conn,
2525 /* IO */ 2195 /* IO */
2526 .send_pdu = iscsi_conn_send_pdu, 2196 .send_pdu = iscsi_conn_send_pdu,
2527 .get_stats = iscsi_conn_get_stats, 2197 .get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 808302832e68..32736831790e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -31,26 +31,25 @@
31#define IN_PROGRESS_DDIGEST_RECV 0x3 31#define IN_PROGRESS_DDIGEST_RECV 0x3
32 32
33/* xmit state machine */ 33/* xmit state machine */
34#define XMSTATE_IDLE 0x0 34#define XMSTATE_IDLE 0x0
35#define XMSTATE_R_HDR 0x1 35#define XMSTATE_R_HDR 0x1
36#define XMSTATE_W_HDR 0x2 36#define XMSTATE_W_HDR 0x2
37#define XMSTATE_IMM_HDR 0x4 37#define XMSTATE_IMM_HDR 0x4
38#define XMSTATE_IMM_DATA 0x8 38#define XMSTATE_IMM_DATA 0x8
39#define XMSTATE_UNS_INIT 0x10 39#define XMSTATE_UNS_INIT 0x10
40#define XMSTATE_UNS_HDR 0x20 40#define XMSTATE_UNS_HDR 0x20
41#define XMSTATE_UNS_DATA 0x40 41#define XMSTATE_UNS_DATA 0x40
42#define XMSTATE_SOL_HDR 0x80 42#define XMSTATE_SOL_HDR 0x80
43#define XMSTATE_SOL_DATA 0x100 43#define XMSTATE_SOL_DATA 0x100
44#define XMSTATE_W_PAD 0x200 44#define XMSTATE_W_PAD 0x200
45#define XMSTATE_DATA_DIGEST 0x400 45#define XMSTATE_W_RESEND_PAD 0x400
46 46#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
47#define ISCSI_CONN_RCVBUF_MIN 262144 47
48#define ISCSI_CONN_SNDBUF_MIN 262144
49#define ISCSI_PAD_LEN 4 48#define ISCSI_PAD_LEN 4
50#define ISCSI_R2T_MAX 16
51#define ISCSI_SG_TABLESIZE SG_ALL 49#define ISCSI_SG_TABLESIZE SG_ALL
52#define ISCSI_TCP_MAX_CMD_LEN 16 50#define ISCSI_TCP_MAX_CMD_LEN 16
53 51
52struct crypto_hash;
54struct socket; 53struct socket;
55 54
56/* Socket connection recieve helper */ 55/* Socket connection recieve helper */
@@ -78,17 +77,12 @@ struct iscsi_tcp_conn {
78 char hdrext[4*sizeof(__u16) + 77 char hdrext[4*sizeof(__u16) +
79 sizeof(__u32)]; 78 sizeof(__u32)];
80 int data_copied; 79 int data_copied;
81 char *data; /* data placeholder */
82 int data_size; /* actual recv_dlength */
83 int stop_stage; /* conn_stop() flag: * 80 int stop_stage; /* conn_stop() flag: *
84 * stop to recover, * 81 * stop to recover, *
85 * stop to terminate */ 82 * stop to terminate */
86 /* iSCSI connection-wide sequencing */ 83 /* iSCSI connection-wide sequencing */
87 int hdr_size; /* PDU header size */ 84 int hdr_size; /* PDU header size */
88 85
89 struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */
90 struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */
91
92 /* control data */ 86 /* control data */
93 struct iscsi_tcp_recv in; /* TCP receive context */ 87 struct iscsi_tcp_recv in; /* TCP receive context */
94 int in_progress; /* connection state machine */ 88 int in_progress; /* connection state machine */
@@ -98,9 +92,9 @@ struct iscsi_tcp_conn {
98 void (*old_state_change)(struct sock *); 92 void (*old_state_change)(struct sock *);
99 void (*old_write_space)(struct sock *); 93 void (*old_write_space)(struct sock *);
100 94
101 /* xmit */ 95 /* data and header digests */
102 struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ 96 struct hash_desc tx_hash; /* CRC32C (Tx) */
103 struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ 97 struct hash_desc rx_hash; /* CRC32C (Rx) */
104 98
105 /* MIB custom statistics */ 99 /* MIB custom statistics */
106 uint32_t sendpage_failures_cnt; 100 uint32_t sendpage_failures_cnt;
@@ -159,19 +153,15 @@ struct iscsi_tcp_cmd_task {
159 struct scatterlist *bad_sg; /* assert statement */ 153 struct scatterlist *bad_sg; /* assert statement */
160 int sg_count; /* SG's to process */ 154 int sg_count; /* SG's to process */
161 uint32_t exp_r2tsn; 155 uint32_t exp_r2tsn;
162 int r2t_data_count; /* R2T Data-Out bytes */
163 int data_offset; 156 int data_offset;
164 struct iscsi_r2t_info *r2t; /* in progress R2T */ 157 struct iscsi_r2t_info *r2t; /* in progress R2T */
165 struct iscsi_queue r2tpool; 158 struct iscsi_queue r2tpool;
166 struct kfifo *r2tqueue; 159 struct kfifo *r2tqueue;
167 struct iscsi_r2t_info **r2ts; 160 struct iscsi_r2t_info **r2ts;
168 uint32_t datadigest; /* for recover digest */
169 int digest_count; 161 int digest_count;
170 uint32_t immdigest; /* for imm data */ 162 uint32_t immdigest; /* for imm data */
171 struct iscsi_buf immbuf; /* for imm data digest */ 163 struct iscsi_buf immbuf; /* for imm data digest */
172 struct iscsi_data_task *dtask; /* data task in progress*/
173 struct iscsi_data_task unsol_dtask; /* unsol data task */ 164 struct iscsi_data_task unsol_dtask; /* unsol data task */
174 int digest_offset; /* for partial buff digest */
175}; 165};
176 166
177#endif /* ISCSI_H */ 167#endif /* ISCSI_H */
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index fc031c76dade..bfac4441d89f 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -131,7 +131,7 @@ static int jazz_esp_detect(struct scsi_host_template *tpnt)
131 esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer)); 131 esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer));
132 132
133 esp->irq = JAZZ_SCSI_IRQ; 133 esp->irq = JAZZ_SCSI_IRQ;
134 request_irq(JAZZ_SCSI_IRQ, esp_intr, SA_INTERRUPT, "JAZZ SCSI", 134 request_irq(JAZZ_SCSI_IRQ, esp_intr, IRQF_DISABLED, "JAZZ SCSI",
135 esp->ehost); 135 esp->ehost);
136 136
137 /* 137 /*
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
258{ 258{
259 int sz = sp->use_sg - 1; 259 int sz = sp->use_sg - 1;
260 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 260 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
261 261
262 while(sz >= 0) { 262 while(sz >= 0) {
263 vdma_free(sg[sz].dma_address); 263 vdma_free(sg[sz].dma_address);
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index eb7bd310cc82..f0871c3ac3d9 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -131,7 +131,7 @@ lasi700_probe(struct parisc_device *dev)
131 host->this_id = 7; 131 host->this_id = 7;
132 host->base = base; 132 host->base = base;
133 host->irq = dev->irq; 133 host->irq = dev->irq;
134 if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) { 134 if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) {
135 printk(KERN_ERR "lasi700: request_irq failed!\n"); 135 printk(KERN_ERR "lasi700: request_irq failed!\n");
136 goto out_put_host; 136 goto out_put_host;
137 } 137 }
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
deleted file mode 100644
index 004e1a0d8b71..000000000000
--- a/drivers/scsi/libata-bmdma.c
+++ /dev/null
@@ -1,1150 +0,0 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * FIXME: missing write posting for 400nS delay enforcement
218 *
219 * LOCKING:
220 * spin_lock_irqsave(host_set lock)
221 */
222
223static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
224{
225 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
226
227 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
228 ata_pause(ap);
229}
230
231
232/**
233 * ata_exec_command - issue ATA command to host controller
234 * @ap: port to which command is being issued
235 * @tf: ATA taskfile register set
236 *
237 * Issues PIO/MMIO write to ATA command register, with proper
238 * synchronization with interrupt handler / other threads.
239 *
240 * LOCKING:
241 * spin_lock_irqsave(host_set lock)
242 */
243void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
244{
245 if (ap->flags & ATA_FLAG_MMIO)
246 ata_exec_command_mmio(ap, tf);
247 else
248 ata_exec_command_pio(ap, tf);
249}
250
251/**
252 * ata_tf_read_pio - input device's ATA taskfile shadow registers
253 * @ap: Port from which input is read
254 * @tf: ATA taskfile register set for storing input
255 *
256 * Reads ATA taskfile registers for currently-selected device
257 * into @tf.
258 *
259 * LOCKING:
260 * Inherited from caller.
261 */
262
263static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
264{
265 struct ata_ioports *ioaddr = &ap->ioaddr;
266
267 tf->command = ata_check_status(ap);
268 tf->feature = inb(ioaddr->error_addr);
269 tf->nsect = inb(ioaddr->nsect_addr);
270 tf->lbal = inb(ioaddr->lbal_addr);
271 tf->lbam = inb(ioaddr->lbam_addr);
272 tf->lbah = inb(ioaddr->lbah_addr);
273 tf->device = inb(ioaddr->device_addr);
274
275 if (tf->flags & ATA_TFLAG_LBA48) {
276 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
277 tf->hob_feature = inb(ioaddr->error_addr);
278 tf->hob_nsect = inb(ioaddr->nsect_addr);
279 tf->hob_lbal = inb(ioaddr->lbal_addr);
280 tf->hob_lbam = inb(ioaddr->lbam_addr);
281 tf->hob_lbah = inb(ioaddr->lbah_addr);
282 }
283}
284
285/**
286 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
287 * @ap: Port from which input is read
288 * @tf: ATA taskfile register set for storing input
289 *
290 * Reads ATA taskfile registers for currently-selected device
291 * into @tf via MMIO.
292 *
293 * LOCKING:
294 * Inherited from caller.
295 */
296
297static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
298{
299 struct ata_ioports *ioaddr = &ap->ioaddr;
300
301 tf->command = ata_check_status(ap);
302 tf->feature = readb((void __iomem *)ioaddr->error_addr);
303 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
304 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
305 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
306 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
307 tf->device = readb((void __iomem *)ioaddr->device_addr);
308
309 if (tf->flags & ATA_TFLAG_LBA48) {
310 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
311 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
312 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
313 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
314 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
315 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
316 }
317}
318
319
320/**
321 * ata_tf_read - input device's ATA taskfile shadow registers
322 * @ap: Port from which input is read
323 * @tf: ATA taskfile register set for storing input
324 *
325 * Reads ATA taskfile registers for currently-selected device
326 * into @tf.
327 *
328 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
329 * is set, also reads the hob registers.
330 *
331 * May be used as the tf_read() entry in ata_port_operations.
332 *
333 * LOCKING:
334 * Inherited from caller.
335 */
336void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
337{
338 if (ap->flags & ATA_FLAG_MMIO)
339 ata_tf_read_mmio(ap, tf);
340 else
341 ata_tf_read_pio(ap, tf);
342}
343
344/**
345 * ata_check_status_pio - Read device status reg & clear interrupt
346 * @ap: port where the device is
347 *
348 * Reads ATA taskfile status register for currently-selected device
349 * and return its value. This also clears pending interrupts
350 * from this device
351 *
352 * LOCKING:
353 * Inherited from caller.
354 */
355static u8 ata_check_status_pio(struct ata_port *ap)
356{
357 return inb(ap->ioaddr.status_addr);
358}
359
360/**
361 * ata_check_status_mmio - Read device status reg & clear interrupt
362 * @ap: port where the device is
363 *
364 * Reads ATA taskfile status register for currently-selected device
365 * via MMIO and return its value. This also clears pending interrupts
366 * from this device
367 *
368 * LOCKING:
369 * Inherited from caller.
370 */
371static u8 ata_check_status_mmio(struct ata_port *ap)
372{
373 return readb((void __iomem *) ap->ioaddr.status_addr);
374}
375
376
377/**
378 * ata_check_status - Read device status reg & clear interrupt
379 * @ap: port where the device is
380 *
381 * Reads ATA taskfile status register for currently-selected device
382 * and return its value. This also clears pending interrupts
383 * from this device
384 *
385 * May be used as the check_status() entry in ata_port_operations.
386 *
387 * LOCKING:
388 * Inherited from caller.
389 */
390u8 ata_check_status(struct ata_port *ap)
391{
392 if (ap->flags & ATA_FLAG_MMIO)
393 return ata_check_status_mmio(ap);
394 return ata_check_status_pio(ap);
395}
396
397
398/**
399 * ata_altstatus - Read device alternate status reg
400 * @ap: port where the device is
401 *
402 * Reads ATA taskfile alternate status register for
403 * currently-selected device and return its value.
404 *
405 * Note: may NOT be used as the check_altstatus() entry in
406 * ata_port_operations.
407 *
408 * LOCKING:
409 * Inherited from caller.
410 */
411u8 ata_altstatus(struct ata_port *ap)
412{
413 if (ap->ops->check_altstatus)
414 return ap->ops->check_altstatus(ap);
415
416 if (ap->flags & ATA_FLAG_MMIO)
417 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
418 return inb(ap->ioaddr.altstatus_addr);
419}
420
421/**
422 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
423 * @qc: Info associated with this ATA transaction.
424 *
425 * LOCKING:
426 * spin_lock_irqsave(host_set lock)
427 */
428
429static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
430{
431 struct ata_port *ap = qc->ap;
432 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
433 u8 dmactl;
434 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
435
436 /* load PRD table addr. */
437 mb(); /* make sure PRD table writes are visible to controller */
438 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
439
440 /* specify data direction, triple-check start bit is clear */
441 dmactl = readb(mmio + ATA_DMA_CMD);
442 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
443 if (!rw)
444 dmactl |= ATA_DMA_WR;
445 writeb(dmactl, mmio + ATA_DMA_CMD);
446
447 /* issue r/w command */
448 ap->ops->exec_command(ap, &qc->tf);
449}
450
451/**
452 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
453 * @qc: Info associated with this ATA transaction.
454 *
455 * LOCKING:
456 * spin_lock_irqsave(host_set lock)
457 */
458
459static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
460{
461 struct ata_port *ap = qc->ap;
462 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
463 u8 dmactl;
464
465 /* start host DMA transaction */
466 dmactl = readb(mmio + ATA_DMA_CMD);
467 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
468
469 /* Strictly, one may wish to issue a readb() here, to
470 * flush the mmio write. However, control also passes
471 * to the hardware at this point, and it will interrupt
472 * us when we are to resume control. So, in effect,
473 * we don't care when the mmio write flushes.
474 * Further, a read of the DMA status register _immediately_
475 * following the write may not be what certain flaky hardware
476 * is expected, so I think it is best to not add a readb()
477 * without first all the MMIO ATA cards/mobos.
478 * Or maybe I'm just being paranoid.
479 */
480}
481
482/**
483 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
484 * @qc: Info associated with this ATA transaction.
485 *
486 * LOCKING:
487 * spin_lock_irqsave(host_set lock)
488 */
489
490static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
491{
492 struct ata_port *ap = qc->ap;
493 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
494 u8 dmactl;
495
496 /* load PRD table addr. */
497 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
498
499 /* specify data direction, triple-check start bit is clear */
500 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
501 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
502 if (!rw)
503 dmactl |= ATA_DMA_WR;
504 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
505
506 /* issue r/w command */
507 ap->ops->exec_command(ap, &qc->tf);
508}
509
510/**
511 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
512 * @qc: Info associated with this ATA transaction.
513 *
514 * LOCKING:
515 * spin_lock_irqsave(host_set lock)
516 */
517
518static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
519{
520 struct ata_port *ap = qc->ap;
521 u8 dmactl;
522
523 /* start host DMA transaction */
524 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
525 outb(dmactl | ATA_DMA_START,
526 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
527}
528
529
530/**
531 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
532 * @qc: Info associated with this ATA transaction.
533 *
534 * Writes the ATA_DMA_START flag to the DMA command register.
535 *
536 * May be used as the bmdma_start() entry in ata_port_operations.
537 *
538 * LOCKING:
539 * spin_lock_irqsave(host_set lock)
540 */
541void ata_bmdma_start(struct ata_queued_cmd *qc)
542{
543 if (qc->ap->flags & ATA_FLAG_MMIO)
544 ata_bmdma_start_mmio(qc);
545 else
546 ata_bmdma_start_pio(qc);
547}
548
549
550/**
551 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
552 * @qc: Info associated with this ATA transaction.
553 *
554 * Writes address of PRD table to device's PRD Table Address
555 * register, sets the DMA control register, and calls
556 * ops->exec_command() to start the transfer.
557 *
558 * May be used as the bmdma_setup() entry in ata_port_operations.
559 *
560 * LOCKING:
561 * spin_lock_irqsave(host_set lock)
562 */
563void ata_bmdma_setup(struct ata_queued_cmd *qc)
564{
565 if (qc->ap->flags & ATA_FLAG_MMIO)
566 ata_bmdma_setup_mmio(qc);
567 else
568 ata_bmdma_setup_pio(qc);
569}
570
571
572/**
573 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
574 * @ap: Port associated with this ATA transaction.
575 *
576 * Clear interrupt and error flags in DMA status register.
577 *
578 * May be used as the irq_clear() entry in ata_port_operations.
579 *
580 * LOCKING:
581 * spin_lock_irqsave(host_set lock)
582 */
583
584void ata_bmdma_irq_clear(struct ata_port *ap)
585{
586 if (!ap->ioaddr.bmdma_addr)
587 return;
588
589 if (ap->flags & ATA_FLAG_MMIO) {
590 void __iomem *mmio =
591 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
592 writeb(readb(mmio), mmio);
593 } else {
594 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
595 outb(inb(addr), addr);
596 }
597}
598
599
600/**
601 * ata_bmdma_status - Read PCI IDE BMDMA status
602 * @ap: Port associated with this ATA transaction.
603 *
604 * Read and return BMDMA status register.
605 *
606 * May be used as the bmdma_status() entry in ata_port_operations.
607 *
608 * LOCKING:
609 * spin_lock_irqsave(host_set lock)
610 */
611
612u8 ata_bmdma_status(struct ata_port *ap)
613{
614 u8 host_stat;
615 if (ap->flags & ATA_FLAG_MMIO) {
616 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
617 host_stat = readb(mmio + ATA_DMA_STATUS);
618 } else
619 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
620 return host_stat;
621}
622
623
624/**
625 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
626 * @qc: Command we are ending DMA for
627 *
628 * Clears the ATA_DMA_START flag in the dma control register
629 *
630 * May be used as the bmdma_stop() entry in ata_port_operations.
631 *
632 * LOCKING:
633 * spin_lock_irqsave(host_set lock)
634 */
635
636void ata_bmdma_stop(struct ata_queued_cmd *qc)
637{
638 struct ata_port *ap = qc->ap;
639 if (ap->flags & ATA_FLAG_MMIO) {
640 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
641
642 /* clear start/stop bit */
643 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
644 mmio + ATA_DMA_CMD);
645 } else {
646 /* clear start/stop bit */
647 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
648 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
649 }
650
651 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
652 ata_altstatus(ap); /* dummy read */
653}
654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @prereset: prereset method (can be NULL)
699 * @softreset: softreset method (can be NULL)
700 * @hardreset: hardreset method (can be NULL)
701 * @postreset: postreset method (can be NULL)
702 *
703 * Handle error for ATA BMDMA controller. It can handle both
704 * PATA and SATA controllers. Many controllers should be able to
705 * use this EH as-is or with some added handling before and
706 * after.
707 *
708 * This function is intended to be used for constructing
709 * ->error_handler callback by low level drivers.
710 *
711 * LOCKING:
712 * Kernel thread context (may sleep)
713 */
714void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
715 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
716 ata_postreset_fn_t postreset)
717{
718 struct ata_eh_context *ehc = &ap->eh_context;
719 struct ata_queued_cmd *qc;
720 unsigned long flags;
721 int thaw = 0;
722
723 qc = __ata_qc_from_tag(ap, ap->active_tag);
724 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
725 qc = NULL;
726
727 /* reset PIO HSM and stop DMA engine */
728 spin_lock_irqsave(ap->lock, flags);
729
730 ap->hsm_task_state = HSM_ST_IDLE;
731
732 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
733 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
734 u8 host_stat;
735
736 host_stat = ata_bmdma_status(ap);
737
738 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
739
740 /* BMDMA controllers indicate host bus error by
741 * setting DMA_ERR bit and timing out. As it wasn't
742 * really a timeout event, adjust error mask and
743 * cancel frozen state.
744 */
745 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
746 qc->err_mask = AC_ERR_HOST_BUS;
747 thaw = 1;
748 }
749
750 ap->ops->bmdma_stop(qc);
751 }
752
753 ata_altstatus(ap);
754 ata_chk_status(ap);
755 ap->ops->irq_clear(ap);
756
757 spin_unlock_irqrestore(ap->lock, flags);
758
759 if (thaw)
760 ata_eh_thaw_port(ap);
761
762 /* PIO and DMA engines have been stopped, perform recovery */
763 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
764}
765
766/**
767 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
768 * @ap: port to handle error for
769 *
770 * Stock error handler for BMDMA controller.
771 *
772 * LOCKING:
773 * Kernel thread context (may sleep)
774 */
775void ata_bmdma_error_handler(struct ata_port *ap)
776{
777 ata_reset_fn_t hardreset;
778
779 hardreset = NULL;
780 if (sata_scr_valid(ap))
781 hardreset = sata_std_hardreset;
782
783 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
784 ata_std_postreset);
785}
786
787/**
788 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
789 * BMDMA controller
790 * @qc: internal command to clean up
791 *
792 * LOCKING:
793 * Kernel thread context (may sleep)
794 */
795void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
796{
797 ata_bmdma_stop(qc);
798}
799
800#ifdef CONFIG_PCI
801static struct ata_probe_ent *
802ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
803{
804 struct ata_probe_ent *probe_ent;
805
806 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
807 if (!probe_ent) {
808 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
809 kobject_name(&(dev->kobj)));
810 return NULL;
811 }
812
813 INIT_LIST_HEAD(&probe_ent->node);
814 probe_ent->dev = dev;
815
816 probe_ent->sht = port->sht;
817 probe_ent->host_flags = port->host_flags;
818 probe_ent->pio_mask = port->pio_mask;
819 probe_ent->mwdma_mask = port->mwdma_mask;
820 probe_ent->udma_mask = port->udma_mask;
821 probe_ent->port_ops = port->port_ops;
822
823 return probe_ent;
824}
825
826
827/**
828 * ata_pci_init_native_mode - Initialize native-mode driver
829 * @pdev: pci device to be initialized
830 * @port: array[2] of pointers to port info structures.
831 * @ports: bitmap of ports present
832 *
833 * Utility function which allocates and initializes an
834 * ata_probe_ent structure for a standard dual-port
835 * PIO-based IDE controller. The returned ata_probe_ent
836 * structure can be passed to ata_device_add(). The returned
837 * ata_probe_ent structure should then be freed with kfree().
838 *
839 * The caller need only pass the address of the primary port, the
840 * secondary will be deduced automatically. If the device has non
841 * standard secondary port mappings this function can be called twice,
842 * once for each interface.
843 */
844
845struct ata_probe_ent *
846ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
847{
848 struct ata_probe_ent *probe_ent =
849 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
850 int p = 0;
851 unsigned long bmdma;
852
853 if (!probe_ent)
854 return NULL;
855
856 probe_ent->irq = pdev->irq;
857 probe_ent->irq_flags = SA_SHIRQ;
858 probe_ent->private_data = port[0]->private_data;
859
860 if (ports & ATA_PORT_PRIMARY) {
861 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
862 probe_ent->port[p].altstatus_addr =
863 probe_ent->port[p].ctl_addr =
864 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
865 bmdma = pci_resource_start(pdev, 4);
866 if (bmdma) {
867 if (inb(bmdma + 2) & 0x80)
868 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
869 probe_ent->port[p].bmdma_addr = bmdma;
870 }
871 ata_std_ports(&probe_ent->port[p]);
872 p++;
873 }
874
875 if (ports & ATA_PORT_SECONDARY) {
876 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
877 probe_ent->port[p].altstatus_addr =
878 probe_ent->port[p].ctl_addr =
879 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
880 bmdma = pci_resource_start(pdev, 4);
881 if (bmdma) {
882 bmdma += 8;
883 if(inb(bmdma + 2) & 0x80)
884 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
885 probe_ent->port[p].bmdma_addr = bmdma;
886 }
887 ata_std_ports(&probe_ent->port[p]);
888 p++;
889 }
890
891 probe_ent->n_ports = p;
892 return probe_ent;
893}
894
895
896static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
897 struct ata_port_info *port, int port_num)
898{
899 struct ata_probe_ent *probe_ent;
900 unsigned long bmdma;
901
902 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
903 if (!probe_ent)
904 return NULL;
905
906 probe_ent->legacy_mode = 1;
907 probe_ent->n_ports = 1;
908 probe_ent->hard_port_no = port_num;
909 probe_ent->private_data = port->private_data;
910
911 switch(port_num)
912 {
913 case 0:
914 probe_ent->irq = 14;
915 probe_ent->port[0].cmd_addr = 0x1f0;
916 probe_ent->port[0].altstatus_addr =
917 probe_ent->port[0].ctl_addr = 0x3f6;
918 break;
919 case 1:
920 probe_ent->irq = 15;
921 probe_ent->port[0].cmd_addr = 0x170;
922 probe_ent->port[0].altstatus_addr =
923 probe_ent->port[0].ctl_addr = 0x376;
924 break;
925 }
926
927 bmdma = pci_resource_start(pdev, 4);
928 if (bmdma != 0) {
929 bmdma += 8 * port_num;
930 probe_ent->port[0].bmdma_addr = bmdma;
931 if (inb(bmdma + 2) & 0x80)
932 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
933 }
934 ata_std_ports(&probe_ent->port[0]);
935
936 return probe_ent;
937}
938
939
940/**
941 * ata_pci_init_one - Initialize/register PCI IDE host controller
942 * @pdev: Controller to be initialized
943 * @port_info: Information from low-level host driver
944 * @n_ports: Number of ports attached to host controller
945 *
946 * This is a helper function which can be called from a driver's
947 * xxx_init_one() probe function if the hardware uses traditional
948 * IDE taskfile registers.
949 *
950 * This function calls pci_enable_device(), reserves its register
951 * regions, sets the dma mask, enables bus master mode, and calls
952 * ata_device_add()
953 *
954 * LOCKING:
955 * Inherited from PCI layer (may sleep).
956 *
957 * RETURNS:
958 * Zero on success, negative on errno-based value on error.
959 */
960
961int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
962 unsigned int n_ports)
963{
964 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
965 struct ata_port_info *port[2];
966 u8 tmp8, mask;
967 unsigned int legacy_mode = 0;
968 int disable_dev_on_err = 1;
969 int rc;
970
971 DPRINTK("ENTER\n");
972
973 port[0] = port_info[0];
974 if (n_ports > 1)
975 port[1] = port_info[1];
976 else
977 port[1] = port[0];
978
979 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
980 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
981 /* TODO: What if one channel is in native mode ... */
982 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
983 mask = (1 << 2) | (1 << 0);
984 if ((tmp8 & mask) != mask)
985 legacy_mode = (1 << 3);
986 }
987
988 /* FIXME... */
989 if ((!legacy_mode) && (n_ports > 2)) {
990 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
991 n_ports = 2;
992 /* For now */
993 }
994
995 /* FIXME: Really for ATA it isn't safe because the device may be
996 multi-purpose and we want to leave it alone if it was already
997 enabled. Secondly for shared use as Arjan says we want refcounting
998
999 Checking dev->is_enabled is insufficient as this is not set at
1000 boot for the primary video which is BIOS enabled
1001 */
1002
1003 rc = pci_enable_device(pdev);
1004 if (rc)
1005 return rc;
1006
1007 rc = pci_request_regions(pdev, DRV_NAME);
1008 if (rc) {
1009 disable_dev_on_err = 0;
1010 goto err_out;
1011 }
1012
1013 /* FIXME: Should use platform specific mappers for legacy port ranges */
1014 if (legacy_mode) {
1015 if (!request_region(0x1f0, 8, "libata")) {
1016 struct resource *conflict, res;
1017 res.start = 0x1f0;
1018 res.end = 0x1f0 + 8 - 1;
1019 conflict = ____request_resource(&ioport_resource, &res);
1020 if (!strcmp(conflict->name, "libata"))
1021 legacy_mode |= (1 << 0);
1022 else {
1023 disable_dev_on_err = 0;
1024 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
1025 }
1026 } else
1027 legacy_mode |= (1 << 0);
1028
1029 if (!request_region(0x170, 8, "libata")) {
1030 struct resource *conflict, res;
1031 res.start = 0x170;
1032 res.end = 0x170 + 8 - 1;
1033 conflict = ____request_resource(&ioport_resource, &res);
1034 if (!strcmp(conflict->name, "libata"))
1035 legacy_mode |= (1 << 1);
1036 else {
1037 disable_dev_on_err = 0;
1038 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
1039 }
1040 } else
1041 legacy_mode |= (1 << 1);
1042 }
1043
1044 /* we have legacy mode, but all ports are unavailable */
1045 if (legacy_mode == (1 << 3)) {
1046 rc = -EBUSY;
1047 goto err_out_regions;
1048 }
1049
1050 /* FIXME: If we get no DMA mask we should fall back to PIO */
1051 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1052 if (rc)
1053 goto err_out_regions;
1054 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1055 if (rc)
1056 goto err_out_regions;
1057
1058 if (legacy_mode) {
1059 if (legacy_mode & (1 << 0))
1060 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1061 if (legacy_mode & (1 << 1))
1062 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1063 } else {
1064 if (n_ports == 2)
1065 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1066 else
1067 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1068 }
1069 if (!probe_ent && !probe_ent2) {
1070 rc = -ENOMEM;
1071 goto err_out_regions;
1072 }
1073
1074 pci_set_master(pdev);
1075
1076 /* FIXME: check ata_device_add return */
1077 if (legacy_mode) {
1078 struct device *dev = &pdev->dev;
1079 struct ata_host_set *host_set = NULL;
1080
1081 if (legacy_mode & (1 << 0)) {
1082 ata_device_add(probe_ent);
1083 host_set = dev_get_drvdata(dev);
1084 }
1085
1086 if (legacy_mode & (1 << 1)) {
1087 ata_device_add(probe_ent2);
1088 if (host_set) {
1089 host_set->next = dev_get_drvdata(dev);
1090 dev_set_drvdata(dev, host_set);
1091 }
1092 }
1093 } else
1094 ata_device_add(probe_ent);
1095
1096 kfree(probe_ent);
1097 kfree(probe_ent2);
1098
1099 return 0;
1100
1101err_out_regions:
1102 if (legacy_mode & (1 << 0))
1103 release_region(0x1f0, 8);
1104 if (legacy_mode & (1 << 1))
1105 release_region(0x170, 8);
1106 pci_release_regions(pdev);
1107err_out:
1108 if (disable_dev_on_err)
1109 pci_disable_device(pdev);
1110 return rc;
1111}
1112
1113/**
1114 * ata_pci_clear_simplex - attempt to kick device out of simplex
1115 * @pdev: PCI device
1116 *
1117 * Some PCI ATA devices report simplex mode but in fact can be told to
1118 * enter non simplex mode. This implements the neccessary logic to
1119 * perform the task on such devices. Calling it on other devices will
1120 * have -undefined- behaviour.
1121 */
1122
1123int ata_pci_clear_simplex(struct pci_dev *pdev)
1124{
1125 unsigned long bmdma = pci_resource_start(pdev, 4);
1126 u8 simplex;
1127
1128 if (bmdma == 0)
1129 return -ENOENT;
1130
1131 simplex = inb(bmdma + 0x02);
1132 outb(simplex & 0x60, bmdma + 0x02);
1133 simplex = inb(bmdma + 0x02);
1134 if (simplex & 0x80)
1135 return -EOPNOTSUPP;
1136 return 0;
1137}
1138
1139unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1140{
1141 /* Filter out DMA modes if the device has been configured by
1142 the BIOS as PIO only */
1143
1144 if (ap->ioaddr.bmdma_addr == 0)
1145 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1146 return xfer_mask;
1147}
1148
1149#endif /* CONFIG_PCI */
1150
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
deleted file mode 100644
index 6c66877be2bf..000000000000
--- a/drivers/scsi/libata-core.c
+++ /dev/null
@@ -1,5916 +0,0 @@
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/list.h>
41#include <linux/mm.h>
42#include <linux/highmem.h>
43#include <linux/spinlock.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/timer.h>
47#include <linux/interrupt.h>
48#include <linux/completion.h>
49#include <linux/suspend.h>
50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
52#include <linux/scatterlist.h>
53#include <scsi/scsi.h>
54#include "scsi_priv.h"
55#include <scsi/scsi_cmnd.h>
56#include <scsi/scsi_host.h>
57#include <linux/libata.h>
58#include <asm/io.h>
59#include <asm/semaphore.h>
60#include <asm/byteorder.h>
61
62#include "libata.h"
63
64/* debounce timing parameters in msecs { interval, duration, timeout } */
65const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
68
69static unsigned int ata_dev_init_params(struct ata_device *dev,
70 u16 heads, u16 sectors);
71static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72static void ata_dev_xfermask(struct ata_device *dev);
73
74static unsigned int ata_unique_id = 1;
75static struct workqueue_struct *ata_wq;
76
77struct workqueue_struct *ata_aux_wq;
78
79int atapi_enabled = 1;
80module_param(atapi_enabled, int, 0444);
81MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82
83int atapi_dmadir = 0;
84module_param(atapi_dmadir, int, 0444);
85MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86
87int libata_fua = 0;
88module_param_named(fua, libata_fua, int, 0444);
89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
90
91MODULE_AUTHOR("Jeff Garzik");
92MODULE_DESCRIPTION("Library module for ATA devices");
93MODULE_LICENSE("GPL");
94MODULE_VERSION(DRV_VERSION);
95
96
97/**
98 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
99 * @tf: Taskfile to convert
100 * @fis: Buffer into which data will output
101 * @pmp: Port multiplier port
102 *
103 * Converts a standard ATA taskfile to a Serial ATA
104 * FIS structure (Register - Host to Device).
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
111{
112 fis[0] = 0x27; /* Register - Host to Device FIS */
113 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
114 bit 7 indicates Command FIS */
115 fis[2] = tf->command;
116 fis[3] = tf->feature;
117
118 fis[4] = tf->lbal;
119 fis[5] = tf->lbam;
120 fis[6] = tf->lbah;
121 fis[7] = tf->device;
122
123 fis[8] = tf->hob_lbal;
124 fis[9] = tf->hob_lbam;
125 fis[10] = tf->hob_lbah;
126 fis[11] = tf->hob_feature;
127
128 fis[12] = tf->nsect;
129 fis[13] = tf->hob_nsect;
130 fis[14] = 0;
131 fis[15] = tf->ctl;
132
133 fis[16] = 0;
134 fis[17] = 0;
135 fis[18] = 0;
136 fis[19] = 0;
137}
138
139/**
140 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
141 * @fis: Buffer from which data will be input
142 * @tf: Taskfile to output
143 *
144 * Converts a serial ATA FIS structure to a standard ATA taskfile.
145 *
146 * LOCKING:
147 * Inherited from caller.
148 */
149
150void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
151{
152 tf->command = fis[2]; /* status */
153 tf->feature = fis[3]; /* error */
154
155 tf->lbal = fis[4];
156 tf->lbam = fis[5];
157 tf->lbah = fis[6];
158 tf->device = fis[7];
159
160 tf->hob_lbal = fis[8];
161 tf->hob_lbam = fis[9];
162 tf->hob_lbah = fis[10];
163
164 tf->nsect = fis[12];
165 tf->hob_nsect = fis[13];
166}
167
168static const u8 ata_rw_cmds[] = {
169 /* pio multi */
170 ATA_CMD_READ_MULTI,
171 ATA_CMD_WRITE_MULTI,
172 ATA_CMD_READ_MULTI_EXT,
173 ATA_CMD_WRITE_MULTI_EXT,
174 0,
175 0,
176 0,
177 ATA_CMD_WRITE_MULTI_FUA_EXT,
178 /* pio */
179 ATA_CMD_PIO_READ,
180 ATA_CMD_PIO_WRITE,
181 ATA_CMD_PIO_READ_EXT,
182 ATA_CMD_PIO_WRITE_EXT,
183 0,
184 0,
185 0,
186 0,
187 /* dma */
188 ATA_CMD_READ,
189 ATA_CMD_WRITE,
190 ATA_CMD_READ_EXT,
191 ATA_CMD_WRITE_EXT,
192 0,
193 0,
194 0,
195 ATA_CMD_WRITE_FUA_EXT
196};
197
198/**
199 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
200 * @qc: command to examine and configure
201 *
202 * Examine the device configuration and tf->flags to calculate
203 * the proper read/write commands and protocol to use.
204 *
205 * LOCKING:
206 * caller.
207 */
208int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
209{
210 struct ata_taskfile *tf = &qc->tf;
211 struct ata_device *dev = qc->dev;
212 u8 cmd;
213
214 int index, fua, lba48, write;
215
216 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
217 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
218 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
219
220 if (dev->flags & ATA_DFLAG_PIO) {
221 tf->protocol = ATA_PROT_PIO;
222 index = dev->multi_count ? 0 : 8;
223 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
224 /* Unable to use DMA due to host limitation */
225 tf->protocol = ATA_PROT_PIO;
226 index = dev->multi_count ? 0 : 8;
227 } else {
228 tf->protocol = ATA_PROT_DMA;
229 index = 16;
230 }
231
232 cmd = ata_rw_cmds[index + fua + lba48 + write];
233 if (cmd) {
234 tf->command = cmd;
235 return 0;
236 }
237 return -1;
238}
239
240/**
241 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
242 * @pio_mask: pio_mask
243 * @mwdma_mask: mwdma_mask
244 * @udma_mask: udma_mask
245 *
246 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
247 * unsigned int xfer_mask.
248 *
249 * LOCKING:
250 * None.
251 *
252 * RETURNS:
253 * Packed xfer_mask.
254 */
255static unsigned int ata_pack_xfermask(unsigned int pio_mask,
256 unsigned int mwdma_mask,
257 unsigned int udma_mask)
258{
259 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
260 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
261 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
262}
263
264/**
265 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
266 * @xfer_mask: xfer_mask to unpack
267 * @pio_mask: resulting pio_mask
268 * @mwdma_mask: resulting mwdma_mask
269 * @udma_mask: resulting udma_mask
270 *
271 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
272 * Any NULL distination masks will be ignored.
273 */
274static void ata_unpack_xfermask(unsigned int xfer_mask,
275 unsigned int *pio_mask,
276 unsigned int *mwdma_mask,
277 unsigned int *udma_mask)
278{
279 if (pio_mask)
280 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
281 if (mwdma_mask)
282 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
283 if (udma_mask)
284 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
285}
286
287static const struct ata_xfer_ent {
288 int shift, bits;
289 u8 base;
290} ata_xfer_tbl[] = {
291 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
292 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
293 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
294 { -1, },
295};
296
297/**
298 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
299 * @xfer_mask: xfer_mask of interest
300 *
301 * Return matching XFER_* value for @xfer_mask. Only the highest
302 * bit of @xfer_mask is considered.
303 *
304 * LOCKING:
305 * None.
306 *
307 * RETURNS:
308 * Matching XFER_* value, 0 if no match found.
309 */
310static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
311{
312 int highbit = fls(xfer_mask) - 1;
313 const struct ata_xfer_ent *ent;
314
315 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
316 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
317 return ent->base + highbit - ent->shift;
318 return 0;
319}
320
321/**
322 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
323 * @xfer_mode: XFER_* of interest
324 *
325 * Return matching xfer_mask for @xfer_mode.
326 *
327 * LOCKING:
328 * None.
329 *
330 * RETURNS:
331 * Matching xfer_mask, 0 if no match found.
332 */
333static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
334{
335 const struct ata_xfer_ent *ent;
336
337 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
338 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
339 return 1 << (ent->shift + xfer_mode - ent->base);
340 return 0;
341}
342
343/**
344 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
345 * @xfer_mode: XFER_* of interest
346 *
347 * Return matching xfer_shift for @xfer_mode.
348 *
349 * LOCKING:
350 * None.
351 *
352 * RETURNS:
353 * Matching xfer_shift, -1 if no match found.
354 */
355static int ata_xfer_mode2shift(unsigned int xfer_mode)
356{
357 const struct ata_xfer_ent *ent;
358
359 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
360 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
361 return ent->shift;
362 return -1;
363}
364
365/**
366 * ata_mode_string - convert xfer_mask to string
367 * @xfer_mask: mask of bits supported; only highest bit counts.
368 *
369 * Determine string which represents the highest speed
370 * (highest bit in @modemask).
371 *
372 * LOCKING:
373 * None.
374 *
375 * RETURNS:
376 * Constant C string representing highest speed listed in
377 * @mode_mask, or the constant C string "<n/a>".
378 */
379static const char *ata_mode_string(unsigned int xfer_mask)
380{
381 static const char * const xfer_mode_str[] = {
382 "PIO0",
383 "PIO1",
384 "PIO2",
385 "PIO3",
386 "PIO4",
387 "MWDMA0",
388 "MWDMA1",
389 "MWDMA2",
390 "UDMA/16",
391 "UDMA/25",
392 "UDMA/33",
393 "UDMA/44",
394 "UDMA/66",
395 "UDMA/100",
396 "UDMA/133",
397 "UDMA7",
398 };
399 int highbit;
400
401 highbit = fls(xfer_mask) - 1;
402 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
403 return xfer_mode_str[highbit];
404 return "<n/a>";
405}
406
407static const char *sata_spd_string(unsigned int spd)
408{
409 static const char * const spd_str[] = {
410 "1.5 Gbps",
411 "3.0 Gbps",
412 };
413
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
415 return "<unknown>";
416 return spd_str[spd - 1];
417}
418
419void ata_dev_disable(struct ata_device *dev)
420{
421 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
423 dev->class++;
424 }
425}
426
427/**
428 * ata_pio_devchk - PATA device presence detection
429 * @ap: ATA channel to examine
430 * @device: Device to examine (starting at zero)
431 *
432 * This technique was originally described in
433 * Hale Landis's ATADRVR (www.ata-atapi.com), and
434 * later found its way into the ATA/ATAPI spec.
435 *
436 * Write a pattern to the ATA shadow registers,
437 * and if a device is present, it will respond by
438 * correctly storing and echoing back the
439 * ATA shadow register contents.
440 *
441 * LOCKING:
442 * caller.
443 */
444
445static unsigned int ata_pio_devchk(struct ata_port *ap,
446 unsigned int device)
447{
448 struct ata_ioports *ioaddr = &ap->ioaddr;
449 u8 nsect, lbal;
450
451 ap->ops->dev_select(ap, device);
452
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
455
456 outb(0xaa, ioaddr->nsect_addr);
457 outb(0x55, ioaddr->lbal_addr);
458
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
461
462 nsect = inb(ioaddr->nsect_addr);
463 lbal = inb(ioaddr->lbal_addr);
464
465 if ((nsect == 0x55) && (lbal == 0xaa))
466 return 1; /* we found a device */
467
468 return 0; /* nothing found */
469}
470
471/**
472 * ata_mmio_devchk - PATA device presence detection
473 * @ap: ATA channel to examine
474 * @device: Device to examine (starting at zero)
475 *
476 * This technique was originally described in
477 * Hale Landis's ATADRVR (www.ata-atapi.com), and
478 * later found its way into the ATA/ATAPI spec.
479 *
480 * Write a pattern to the ATA shadow registers,
481 * and if a device is present, it will respond by
482 * correctly storing and echoing back the
483 * ATA shadow register contents.
484 *
485 * LOCKING:
486 * caller.
487 */
488
489static unsigned int ata_mmio_devchk(struct ata_port *ap,
490 unsigned int device)
491{
492 struct ata_ioports *ioaddr = &ap->ioaddr;
493 u8 nsect, lbal;
494
495 ap->ops->dev_select(ap, device);
496
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
499
500 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
502
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
505
506 nsect = readb((void __iomem *) ioaddr->nsect_addr);
507 lbal = readb((void __iomem *) ioaddr->lbal_addr);
508
509 if ((nsect == 0x55) && (lbal == 0xaa))
510 return 1; /* we found a device */
511
512 return 0; /* nothing found */
513}
514
515/**
516 * ata_devchk - PATA device presence detection
517 * @ap: ATA channel to examine
518 * @device: Device to examine (starting at zero)
519 *
520 * Dispatch ATA device presence detection, depending
521 * on whether we are using PIO or MMIO to talk to the
522 * ATA shadow registers.
523 *
524 * LOCKING:
525 * caller.
526 */
527
528static unsigned int ata_devchk(struct ata_port *ap,
529 unsigned int device)
530{
531 if (ap->flags & ATA_FLAG_MMIO)
532 return ata_mmio_devchk(ap, device);
533 return ata_pio_devchk(ap, device);
534}
535
536/**
537 * ata_dev_classify - determine device type based on ATA-spec signature
538 * @tf: ATA taskfile register set for device to be identified
539 *
540 * Determine from taskfile register contents whether a device is
541 * ATA or ATAPI, as per "Signature and persistence" section
542 * of ATA/PI spec (volume 1, sect 5.14).
543 *
544 * LOCKING:
545 * None.
546 *
547 * RETURNS:
548 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
549 * the event of failure.
550 */
551
552unsigned int ata_dev_classify(const struct ata_taskfile *tf)
553{
554 /* Apple's open source Darwin code hints that some devices only
555 * put a proper signature into the LBA mid/high registers,
556 * So, we only check those. It's sufficient for uniqueness.
557 */
558
559 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
560 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
561 DPRINTK("found ATA device by sig\n");
562 return ATA_DEV_ATA;
563 }
564
565 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
566 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
567 DPRINTK("found ATAPI device by sig\n");
568 return ATA_DEV_ATAPI;
569 }
570
571 DPRINTK("unknown device\n");
572 return ATA_DEV_UNKNOWN;
573}
574
575/**
576 * ata_dev_try_classify - Parse returned ATA device signature
577 * @ap: ATA channel to examine
578 * @device: Device to examine (starting at zero)
579 * @r_err: Value of error register on completion
580 *
581 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
582 * an ATA/ATAPI-defined set of values is placed in the ATA
583 * shadow registers, indicating the results of device detection
584 * and diagnostics.
585 *
586 * Select the ATA device, and read the values from the ATA shadow
587 * registers. Then parse according to the Error register value,
588 * and the spec-defined values examined by ata_dev_classify().
589 *
590 * LOCKING:
591 * caller.
592 *
593 * RETURNS:
594 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
595 */
596
597static unsigned int
598ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
599{
600 struct ata_taskfile tf;
601 unsigned int class;
602 u8 err;
603
604 ap->ops->dev_select(ap, device);
605
606 memset(&tf, 0, sizeof(tf));
607
608 ap->ops->tf_read(ap, &tf);
609 err = tf.feature;
610 if (r_err)
611 *r_err = err;
612
613 /* see if device passed diags */
614 if (err == 1)
615 /* do nothing */ ;
616 else if ((device == 0) && (err == 0x81))
617 /* do nothing */ ;
618 else
619 return ATA_DEV_NONE;
620
621 /* determine if device is ATA or ATAPI */
622 class = ata_dev_classify(&tf);
623
624 if (class == ATA_DEV_UNKNOWN)
625 return ATA_DEV_NONE;
626 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
627 return ATA_DEV_NONE;
628 return class;
629}
630
631/**
632 * ata_id_string - Convert IDENTIFY DEVICE page into string
633 * @id: IDENTIFY DEVICE results we will examine
634 * @s: string into which data is output
635 * @ofs: offset into identify device page
636 * @len: length of string to return. must be an even number.
637 *
638 * The strings in the IDENTIFY DEVICE page are broken up into
639 * 16-bit chunks. Run through the string, and output each
640 * 8-bit chunk linearly, regardless of platform.
641 *
642 * LOCKING:
643 * caller.
644 */
645
646void ata_id_string(const u16 *id, unsigned char *s,
647 unsigned int ofs, unsigned int len)
648{
649 unsigned int c;
650
651 while (len > 0) {
652 c = id[ofs] >> 8;
653 *s = c;
654 s++;
655
656 c = id[ofs] & 0xff;
657 *s = c;
658 s++;
659
660 ofs++;
661 len -= 2;
662 }
663}
664
665/**
666 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
667 * @id: IDENTIFY DEVICE results we will examine
668 * @s: string into which data is output
669 * @ofs: offset into identify device page
670 * @len: length of string to return. must be an odd number.
671 *
672 * This function is identical to ata_id_string except that it
673 * trims trailing spaces and terminates the resulting string with
674 * null. @len must be actual maximum length (even number) + 1.
675 *
676 * LOCKING:
677 * caller.
678 */
679void ata_id_c_string(const u16 *id, unsigned char *s,
680 unsigned int ofs, unsigned int len)
681{
682 unsigned char *p;
683
684 WARN_ON(!(len & 1));
685
686 ata_id_string(id, s, ofs, len - 1);
687
688 p = s + strnlen(s, len - 1);
689 while (p > s && p[-1] == ' ')
690 p--;
691 *p = '\0';
692}
693
694static u64 ata_id_n_sectors(const u16 *id)
695{
696 if (ata_id_has_lba(id)) {
697 if (ata_id_has_lba48(id))
698 return ata_id_u64(id, 100);
699 else
700 return ata_id_u32(id, 60);
701 } else {
702 if (ata_id_current_chs_valid(id))
703 return ata_id_u32(id, 57);
704 else
705 return id[1] * id[3] * id[6];
706 }
707}
708
709/**
710 * ata_noop_dev_select - Select device 0/1 on ATA bus
711 * @ap: ATA channel to manipulate
712 * @device: ATA device (numbered from zero) to select
713 *
714 * This function performs no actual function.
715 *
716 * May be used as the dev_select() entry in ata_port_operations.
717 *
718 * LOCKING:
719 * caller.
720 */
721void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
722{
723}
724
725
726/**
727 * ata_std_dev_select - Select device 0/1 on ATA bus
728 * @ap: ATA channel to manipulate
729 * @device: ATA device (numbered from zero) to select
730 *
731 * Use the method defined in the ATA specification to
732 * make either device 0, or device 1, active on the
733 * ATA channel. Works with both PIO and MMIO.
734 *
735 * May be used as the dev_select() entry in ata_port_operations.
736 *
737 * LOCKING:
738 * caller.
739 */
740
741void ata_std_dev_select (struct ata_port *ap, unsigned int device)
742{
743 u8 tmp;
744
745 if (device == 0)
746 tmp = ATA_DEVICE_OBS;
747 else
748 tmp = ATA_DEVICE_OBS | ATA_DEV1;
749
750 if (ap->flags & ATA_FLAG_MMIO) {
751 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
752 } else {
753 outb(tmp, ap->ioaddr.device_addr);
754 }
755 ata_pause(ap); /* needed; also flushes, for mmio */
756}
757
758/**
759 * ata_dev_select - Select device 0/1 on ATA bus
760 * @ap: ATA channel to manipulate
761 * @device: ATA device (numbered from zero) to select
762 * @wait: non-zero to wait for Status register BSY bit to clear
763 * @can_sleep: non-zero if context allows sleeping
764 *
765 * Use the method defined in the ATA specification to
766 * make either device 0, or device 1, active on the
767 * ATA channel.
768 *
769 * This is a high-level version of ata_std_dev_select(),
770 * which additionally provides the services of inserting
771 * the proper pauses and status polling, where needed.
772 *
773 * LOCKING:
774 * caller.
775 */
776
777void ata_dev_select(struct ata_port *ap, unsigned int device,
778 unsigned int wait, unsigned int can_sleep)
779{
780 if (ata_msg_probe(ap)) {
781 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
782 "device %u, wait %u\n",
783 ap->id, device, wait);
784 }
785
786 if (wait)
787 ata_wait_idle(ap);
788
789 ap->ops->dev_select(ap, device);
790
791 if (wait) {
792 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
793 msleep(150);
794 ata_wait_idle(ap);
795 }
796}
797
798/**
799 * ata_dump_id - IDENTIFY DEVICE info debugging output
800 * @id: IDENTIFY DEVICE page to dump
801 *
802 * Dump selected 16-bit words from the given IDENTIFY DEVICE
803 * page.
804 *
805 * LOCKING:
806 * caller.
807 */
808
809static inline void ata_dump_id(const u16 *id)
810{
811 DPRINTK("49==0x%04x "
812 "53==0x%04x "
813 "63==0x%04x "
814 "64==0x%04x "
815 "75==0x%04x \n",
816 id[49],
817 id[53],
818 id[63],
819 id[64],
820 id[75]);
821 DPRINTK("80==0x%04x "
822 "81==0x%04x "
823 "82==0x%04x "
824 "83==0x%04x "
825 "84==0x%04x \n",
826 id[80],
827 id[81],
828 id[82],
829 id[83],
830 id[84]);
831 DPRINTK("88==0x%04x "
832 "93==0x%04x\n",
833 id[88],
834 id[93]);
835}
836
837/**
838 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
839 * @id: IDENTIFY data to compute xfer mask from
840 *
841 * Compute the xfermask for this device. This is not as trivial
842 * as it seems if we must consider early devices correctly.
843 *
844 * FIXME: pre IDE drive timing (do we care ?).
845 *
846 * LOCKING:
847 * None.
848 *
849 * RETURNS:
850 * Computed xfermask
851 */
852static unsigned int ata_id_xfermask(const u16 *id)
853{
854 unsigned int pio_mask, mwdma_mask, udma_mask;
855
856 /* Usual case. Word 53 indicates word 64 is valid */
857 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
858 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
859 pio_mask <<= 3;
860 pio_mask |= 0x7;
861 } else {
862 /* If word 64 isn't valid then Word 51 high byte holds
863 * the PIO timing number for the maximum. Turn it into
864 * a mask.
865 */
866 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
867
868 /* But wait.. there's more. Design your standards by
869 * committee and you too can get a free iordy field to
870 * process. However its the speeds not the modes that
871 * are supported... Note drivers using the timing API
872 * will get this right anyway
873 */
874 }
875
876 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
877
878 udma_mask = 0;
879 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
880 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
881
882 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
883}
884
885/**
886 * ata_port_queue_task - Queue port_task
887 * @ap: The ata_port to queue port_task for
888 * @fn: workqueue function to be scheduled
889 * @data: data value to pass to workqueue function
890 * @delay: delay time for workqueue function
891 *
892 * Schedule @fn(@data) for execution after @delay jiffies using
893 * port_task. There is one port_task per port and it's the
894 * user(low level driver)'s responsibility to make sure that only
895 * one task is active at any given time.
896 *
897 * libata core layer takes care of synchronization between
898 * port_task and EH. ata_port_queue_task() may be ignored for EH
899 * synchronization.
900 *
901 * LOCKING:
902 * Inherited from caller.
903 */
904void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
905 unsigned long delay)
906{
907 int rc;
908
909 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
910 return;
911
912 PREPARE_WORK(&ap->port_task, fn, data);
913
914 if (!delay)
915 rc = queue_work(ata_wq, &ap->port_task);
916 else
917 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
918
919 /* rc == 0 means that another user is using port task */
920 WARN_ON(rc == 0);
921}
922
923/**
924 * ata_port_flush_task - Flush port_task
925 * @ap: The ata_port to flush port_task for
926 *
927 * After this function completes, port_task is guranteed not to
928 * be running or scheduled.
929 *
930 * LOCKING:
931 * Kernel thread context (may sleep)
932 */
933void ata_port_flush_task(struct ata_port *ap)
934{
935 unsigned long flags;
936
937 DPRINTK("ENTER\n");
938
939 spin_lock_irqsave(ap->lock, flags);
940 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
941 spin_unlock_irqrestore(ap->lock, flags);
942
943 DPRINTK("flush #1\n");
944 flush_workqueue(ata_wq);
945
946 /*
947 * At this point, if a task is running, it's guaranteed to see
948 * the FLUSH flag; thus, it will never queue pio tasks again.
949 * Cancel and flush.
950 */
951 if (!cancel_delayed_work(&ap->port_task)) {
952 if (ata_msg_ctl(ap))
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
954 flush_workqueue(ata_wq);
955 }
956
957 spin_lock_irqsave(ap->lock, flags);
958 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
959 spin_unlock_irqrestore(ap->lock, flags);
960
961 if (ata_msg_ctl(ap))
962 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
963}
964
965void ata_qc_complete_internal(struct ata_queued_cmd *qc)
966{
967 struct completion *waiting = qc->private_data;
968
969 complete(waiting);
970}
971
972/**
973 * ata_exec_internal - execute libata internal command
974 * @dev: Device to which the command is sent
975 * @tf: Taskfile registers for the command and the result
976 * @cdb: CDB for packet command
977 * @dma_dir: Data tranfer direction of the command
978 * @buf: Data buffer of the command
979 * @buflen: Length of data buffer
980 *
981 * Executes libata internal command with timeout. @tf contains
982 * command on entry and result on return. Timeout and error
983 * conditions are reported via return value. No recovery action
984 * is taken after a command times out. It's caller's duty to
985 * clean up after timeout.
986 *
987 * LOCKING:
988 * None. Should be called with kernel context, might sleep.
989 *
990 * RETURNS:
991 * Zero on success, AC_ERR_* mask on failure
992 */
993unsigned ata_exec_internal(struct ata_device *dev,
994 struct ata_taskfile *tf, const u8 *cdb,
995 int dma_dir, void *buf, unsigned int buflen)
996{
997 struct ata_port *ap = dev->ap;
998 u8 command = tf->command;
999 struct ata_queued_cmd *qc;
1000 unsigned int tag, preempted_tag;
1001 u32 preempted_sactive, preempted_qc_active;
1002 DECLARE_COMPLETION(wait);
1003 unsigned long flags;
1004 unsigned int err_mask;
1005 int rc;
1006
1007 spin_lock_irqsave(ap->lock, flags);
1008
1009 /* no internal command while frozen */
1010 if (ap->flags & ATA_FLAG_FROZEN) {
1011 spin_unlock_irqrestore(ap->lock, flags);
1012 return AC_ERR_SYSTEM;
1013 }
1014
1015 /* initialize internal qc */
1016
1017 /* XXX: Tag 0 is used for drivers with legacy EH as some
1018 * drivers choke if any other tag is given. This breaks
1019 * ata_tag_internal() test for those drivers. Don't use new
1020 * EH stuff without converting to it.
1021 */
1022 if (ap->ops->error_handler)
1023 tag = ATA_TAG_INTERNAL;
1024 else
1025 tag = 0;
1026
1027 if (test_and_set_bit(tag, &ap->qc_allocated))
1028 BUG();
1029 qc = __ata_qc_from_tag(ap, tag);
1030
1031 qc->tag = tag;
1032 qc->scsicmd = NULL;
1033 qc->ap = ap;
1034 qc->dev = dev;
1035 ata_qc_reinit(qc);
1036
1037 preempted_tag = ap->active_tag;
1038 preempted_sactive = ap->sactive;
1039 preempted_qc_active = ap->qc_active;
1040 ap->active_tag = ATA_TAG_POISON;
1041 ap->sactive = 0;
1042 ap->qc_active = 0;
1043
1044 /* prepare & issue qc */
1045 qc->tf = *tf;
1046 if (cdb)
1047 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1048 qc->flags |= ATA_QCFLAG_RESULT_TF;
1049 qc->dma_dir = dma_dir;
1050 if (dma_dir != DMA_NONE) {
1051 ata_sg_init_one(qc, buf, buflen);
1052 qc->nsect = buflen / ATA_SECT_SIZE;
1053 }
1054
1055 qc->private_data = &wait;
1056 qc->complete_fn = ata_qc_complete_internal;
1057
1058 ata_qc_issue(qc);
1059
1060 spin_unlock_irqrestore(ap->lock, flags);
1061
1062 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1063
1064 ata_port_flush_task(ap);
1065
1066 if (!rc) {
1067 spin_lock_irqsave(ap->lock, flags);
1068
1069 /* We're racing with irq here. If we lose, the
1070 * following test prevents us from completing the qc
1071 * twice. If we win, the port is frozen and will be
1072 * cleaned up by ->post_internal_cmd().
1073 */
1074 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1075 qc->err_mask |= AC_ERR_TIMEOUT;
1076
1077 if (ap->ops->error_handler)
1078 ata_port_freeze(ap);
1079 else
1080 ata_qc_complete(qc);
1081
1082 if (ata_msg_warn(ap))
1083 ata_dev_printk(dev, KERN_WARNING,
1084 "qc timeout (cmd 0x%x)\n", command);
1085 }
1086
1087 spin_unlock_irqrestore(ap->lock, flags);
1088 }
1089
1090 /* do post_internal_cmd */
1091 if (ap->ops->post_internal_cmd)
1092 ap->ops->post_internal_cmd(qc);
1093
1094 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1095 if (ata_msg_warn(ap))
1096 ata_dev_printk(dev, KERN_WARNING,
1097 "zero err_mask for failed "
1098 "internal command, assuming AC_ERR_OTHER\n");
1099 qc->err_mask |= AC_ERR_OTHER;
1100 }
1101
1102 /* finish up */
1103 spin_lock_irqsave(ap->lock, flags);
1104
1105 *tf = qc->result_tf;
1106 err_mask = qc->err_mask;
1107
1108 ata_qc_free(qc);
1109 ap->active_tag = preempted_tag;
1110 ap->sactive = preempted_sactive;
1111 ap->qc_active = preempted_qc_active;
1112
1113 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1114 * Until those drivers are fixed, we detect the condition
1115 * here, fail the command with AC_ERR_SYSTEM and reenable the
1116 * port.
1117 *
1118 * Note that this doesn't change any behavior as internal
1119 * command failure results in disabling the device in the
1120 * higher layer for LLDDs without new reset/EH callbacks.
1121 *
1122 * Kill the following code as soon as those drivers are fixed.
1123 */
1124 if (ap->flags & ATA_FLAG_DISABLED) {
1125 err_mask |= AC_ERR_SYSTEM;
1126 ata_port_probe(ap);
1127 }
1128
1129 spin_unlock_irqrestore(ap->lock, flags);
1130
1131 return err_mask;
1132}
1133
1134/**
1135 * ata_pio_need_iordy - check if iordy needed
1136 * @adev: ATA device
1137 *
1138 * Check if the current speed of the device requires IORDY. Used
1139 * by various controllers for chip configuration.
1140 */
1141
1142unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1143{
1144 int pio;
1145 int speed = adev->pio_mode - XFER_PIO_0;
1146
1147 if (speed < 2)
1148 return 0;
1149 if (speed > 2)
1150 return 1;
1151
1152 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1153
1154 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1155 pio = adev->id[ATA_ID_EIDE_PIO];
1156 /* Is the speed faster than the drive allows non IORDY ? */
1157 if (pio) {
1158 /* This is cycle times not frequency - watch the logic! */
1159 if (pio > 240) /* PIO2 is 240nS per cycle */
1160 return 1;
1161 return 0;
1162 }
1163 }
1164 return 0;
1165}
1166
1167/**
1168 * ata_dev_read_id - Read ID data from the specified device
1169 * @dev: target device
1170 * @p_class: pointer to class of the target device (may be changed)
1171 * @post_reset: is this read ID post-reset?
1172 * @id: buffer to read IDENTIFY data into
1173 *
1174 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1175 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1176 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1177 * for pre-ATA4 drives.
1178 *
1179 * LOCKING:
1180 * Kernel thread context (may sleep)
1181 *
1182 * RETURNS:
1183 * 0 on success, -errno otherwise.
1184 */
1185int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1186 int post_reset, u16 *id)
1187{
1188 struct ata_port *ap = dev->ap;
1189 unsigned int class = *p_class;
1190 struct ata_taskfile tf;
1191 unsigned int err_mask = 0;
1192 const char *reason;
1193 int rc;
1194
1195 if (ata_msg_ctl(ap))
1196 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1197 __FUNCTION__, ap->id, dev->devno);
1198
1199 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1200
1201 retry:
1202 ata_tf_init(dev, &tf);
1203
1204 switch (class) {
1205 case ATA_DEV_ATA:
1206 tf.command = ATA_CMD_ID_ATA;
1207 break;
1208 case ATA_DEV_ATAPI:
1209 tf.command = ATA_CMD_ID_ATAPI;
1210 break;
1211 default:
1212 rc = -ENODEV;
1213 reason = "unsupported class";
1214 goto err_out;
1215 }
1216
1217 tf.protocol = ATA_PROT_PIO;
1218
1219 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1220 id, sizeof(id[0]) * ATA_ID_WORDS);
1221 if (err_mask) {
1222 rc = -EIO;
1223 reason = "I/O error";
1224 goto err_out;
1225 }
1226
1227 swap_buf_le16(id, ATA_ID_WORDS);
1228
1229 /* sanity check */
1230 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1231 rc = -EINVAL;
1232 reason = "device reports illegal type";
1233 goto err_out;
1234 }
1235
1236 if (post_reset && class == ATA_DEV_ATA) {
1237 /*
1238 * The exact sequence expected by certain pre-ATA4 drives is:
1239 * SRST RESET
1240 * IDENTIFY
1241 * INITIALIZE DEVICE PARAMETERS
1242 * anything else..
1243 * Some drives were very specific about that exact sequence.
1244 */
1245 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1246 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1247 if (err_mask) {
1248 rc = -EIO;
1249 reason = "INIT_DEV_PARAMS failed";
1250 goto err_out;
1251 }
1252
1253 /* current CHS translation info (id[53-58]) might be
1254 * changed. reread the identify device info.
1255 */
1256 post_reset = 0;
1257 goto retry;
1258 }
1259 }
1260
1261 *p_class = class;
1262
1263 return 0;
1264
1265 err_out:
1266 if (ata_msg_warn(ap))
1267 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1268 "(%s, err_mask=0x%x)\n", reason, err_mask);
1269 return rc;
1270}
1271
1272static inline u8 ata_dev_knobble(struct ata_device *dev)
1273{
1274 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1275}
1276
1277static void ata_dev_config_ncq(struct ata_device *dev,
1278 char *desc, size_t desc_sz)
1279{
1280 struct ata_port *ap = dev->ap;
1281 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1282
1283 if (!ata_id_has_ncq(dev->id)) {
1284 desc[0] = '\0';
1285 return;
1286 }
1287
1288 if (ap->flags & ATA_FLAG_NCQ) {
1289 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1290 dev->flags |= ATA_DFLAG_NCQ;
1291 }
1292
1293 if (hdepth >= ddepth)
1294 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1295 else
1296 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1297}
1298
1299/**
1300 * ata_dev_configure - Configure the specified ATA/ATAPI device
1301 * @dev: Target device to configure
1302 * @print_info: Enable device info printout
1303 *
1304 * Configure @dev according to @dev->id. Generic and low-level
1305 * driver specific fixups are also applied.
1306 *
1307 * LOCKING:
1308 * Kernel thread context (may sleep)
1309 *
1310 * RETURNS:
1311 * 0 on success, -errno otherwise
1312 */
1313int ata_dev_configure(struct ata_device *dev, int print_info)
1314{
1315 struct ata_port *ap = dev->ap;
1316 const u16 *id = dev->id;
1317 unsigned int xfer_mask;
1318 int i, rc;
1319
1320 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1321 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1322 __FUNCTION__, ap->id, dev->devno);
1323 return 0;
1324 }
1325
1326 if (ata_msg_probe(ap))
1327 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1328 __FUNCTION__, ap->id, dev->devno);
1329
1330 /* print device capabilities */
1331 if (ata_msg_probe(ap))
1332 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
1333 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1334 __FUNCTION__,
1335 id[49], id[82], id[83], id[84],
1336 id[85], id[86], id[87], id[88]);
1337
1338 /* initialize to-be-configured parameters */
1339 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1340 dev->max_sectors = 0;
1341 dev->cdb_len = 0;
1342 dev->n_sectors = 0;
1343 dev->cylinders = 0;
1344 dev->heads = 0;
1345 dev->sectors = 0;
1346
1347 /*
1348 * common ATA, ATAPI feature tests
1349 */
1350
1351 /* find max transfer mode; for printk only */
1352 xfer_mask = ata_id_xfermask(id);
1353
1354 if (ata_msg_probe(ap))
1355 ata_dump_id(id);
1356
1357 /* ATA-specific feature tests */
1358 if (dev->class == ATA_DEV_ATA) {
1359 dev->n_sectors = ata_id_n_sectors(id);
1360
1361 if (ata_id_has_lba(id)) {
1362 const char *lba_desc;
1363 char ncq_desc[20];
1364
1365 lba_desc = "LBA";
1366 dev->flags |= ATA_DFLAG_LBA;
1367 if (ata_id_has_lba48(id)) {
1368 dev->flags |= ATA_DFLAG_LBA48;
1369 lba_desc = "LBA48";
1370 }
1371
1372 /* config NCQ */
1373 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1374
1375 /* print device info to dmesg */
1376 if (ata_msg_info(ap))
1377 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1378 "max %s, %Lu sectors: %s %s\n",
1379 ata_id_major_version(id),
1380 ata_mode_string(xfer_mask),
1381 (unsigned long long)dev->n_sectors,
1382 lba_desc, ncq_desc);
1383 } else {
1384 /* CHS */
1385
1386 /* Default translation */
1387 dev->cylinders = id[1];
1388 dev->heads = id[3];
1389 dev->sectors = id[6];
1390
1391 if (ata_id_current_chs_valid(id)) {
1392 /* Current CHS translation is valid. */
1393 dev->cylinders = id[54];
1394 dev->heads = id[55];
1395 dev->sectors = id[56];
1396 }
1397
1398 /* print device info to dmesg */
1399 if (ata_msg_info(ap))
1400 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1401 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1402 ata_id_major_version(id),
1403 ata_mode_string(xfer_mask),
1404 (unsigned long long)dev->n_sectors,
1405 dev->cylinders, dev->heads, dev->sectors);
1406 }
1407
1408 if (dev->id[59] & 0x100) {
1409 dev->multi_count = dev->id[59] & 0xff;
1410 if (ata_msg_info(ap))
1411 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
1412 ap->id, dev->devno, dev->multi_count);
1413 }
1414
1415 dev->cdb_len = 16;
1416 }
1417
1418 /* ATAPI-specific feature tests */
1419 else if (dev->class == ATA_DEV_ATAPI) {
1420 char *cdb_intr_string = "";
1421
1422 rc = atapi_cdb_len(id);
1423 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1424 if (ata_msg_warn(ap))
1425 ata_dev_printk(dev, KERN_WARNING,
1426 "unsupported CDB len\n");
1427 rc = -EINVAL;
1428 goto err_out_nosup;
1429 }
1430 dev->cdb_len = (unsigned int) rc;
1431
1432 if (ata_id_cdb_intr(dev->id)) {
1433 dev->flags |= ATA_DFLAG_CDB_INTR;
1434 cdb_intr_string = ", CDB intr";
1435 }
1436
1437 /* print device info to dmesg */
1438 if (ata_msg_info(ap))
1439 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1440 ata_mode_string(xfer_mask),
1441 cdb_intr_string);
1442 }
1443
1444 ap->host->max_cmd_len = 0;
1445 for (i = 0; i < ATA_MAX_DEVICES; i++)
1446 ap->host->max_cmd_len = max_t(unsigned int,
1447 ap->host->max_cmd_len,
1448 ap->device[i].cdb_len);
1449
1450 /* limit bridge transfers to udma5, 200 sectors */
1451 if (ata_dev_knobble(dev)) {
1452 if (ata_msg_info(ap))
1453 ata_dev_printk(dev, KERN_INFO,
1454 "applying bridge limits\n");
1455 dev->udma_mask &= ATA_UDMA5;
1456 dev->max_sectors = ATA_MAX_SECTORS;
1457 }
1458
1459 if (ap->ops->dev_config)
1460 ap->ops->dev_config(ap, dev);
1461
1462 if (ata_msg_probe(ap))
1463 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1464 __FUNCTION__, ata_chk_status(ap));
1465 return 0;
1466
1467err_out_nosup:
1468 if (ata_msg_probe(ap))
1469 ata_dev_printk(dev, KERN_DEBUG,
1470 "%s: EXIT, err\n", __FUNCTION__);
1471 return rc;
1472}
1473
1474/**
1475 * ata_bus_probe - Reset and probe ATA bus
1476 * @ap: Bus to probe
1477 *
1478 * Master ATA bus probing function. Initiates a hardware-dependent
1479 * bus reset, then attempts to identify any devices found on
1480 * the bus.
1481 *
1482 * LOCKING:
1483 * PCI/etc. bus probe sem.
1484 *
1485 * RETURNS:
1486 * Zero on success, negative errno otherwise.
1487 */
1488
1489static int ata_bus_probe(struct ata_port *ap)
1490{
1491 unsigned int classes[ATA_MAX_DEVICES];
1492 int tries[ATA_MAX_DEVICES];
1493 int i, rc, down_xfermask;
1494 struct ata_device *dev;
1495
1496 ata_port_probe(ap);
1497
1498 for (i = 0; i < ATA_MAX_DEVICES; i++)
1499 tries[i] = ATA_PROBE_MAX_TRIES;
1500
1501 retry:
1502 down_xfermask = 0;
1503
1504 /* reset and determine device classes */
1505 ap->ops->phy_reset(ap);
1506
1507 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1508 dev = &ap->device[i];
1509
1510 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1511 dev->class != ATA_DEV_UNKNOWN)
1512 classes[dev->devno] = dev->class;
1513 else
1514 classes[dev->devno] = ATA_DEV_NONE;
1515
1516 dev->class = ATA_DEV_UNKNOWN;
1517 }
1518
1519 ata_port_probe(ap);
1520
1521 /* after the reset the device state is PIO 0 and the controller
1522 state is undefined. Record the mode */
1523
1524 for (i = 0; i < ATA_MAX_DEVICES; i++)
1525 ap->device[i].pio_mode = XFER_PIO_0;
1526
1527 /* read IDENTIFY page and configure devices */
1528 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1529 dev = &ap->device[i];
1530
1531 if (tries[i])
1532 dev->class = classes[i];
1533
1534 if (!ata_dev_enabled(dev))
1535 continue;
1536
1537 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1538 if (rc)
1539 goto fail;
1540
1541 rc = ata_dev_configure(dev, 1);
1542 if (rc)
1543 goto fail;
1544 }
1545
1546 /* configure transfer mode */
1547 rc = ata_set_mode(ap, &dev);
1548 if (rc) {
1549 down_xfermask = 1;
1550 goto fail;
1551 }
1552
1553 for (i = 0; i < ATA_MAX_DEVICES; i++)
1554 if (ata_dev_enabled(&ap->device[i]))
1555 return 0;
1556
1557 /* no device present, disable port */
1558 ata_port_disable(ap);
1559 ap->ops->port_disable(ap);
1560 return -ENODEV;
1561
1562 fail:
1563 switch (rc) {
1564 case -EINVAL:
1565 case -ENODEV:
1566 tries[dev->devno] = 0;
1567 break;
1568 case -EIO:
1569 sata_down_spd_limit(ap);
1570 /* fall through */
1571 default:
1572 tries[dev->devno]--;
1573 if (down_xfermask &&
1574 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1575 tries[dev->devno] = 0;
1576 }
1577
1578 if (!tries[dev->devno]) {
1579 ata_down_xfermask_limit(dev, 1);
1580 ata_dev_disable(dev);
1581 }
1582
1583 goto retry;
1584}
1585
1586/**
1587 * ata_port_probe - Mark port as enabled
1588 * @ap: Port for which we indicate enablement
1589 *
1590 * Modify @ap data structure such that the system
1591 * thinks that the entire port is enabled.
1592 *
1593 * LOCKING: host_set lock, or some other form of
1594 * serialization.
1595 */
1596
1597void ata_port_probe(struct ata_port *ap)
1598{
1599 ap->flags &= ~ATA_FLAG_DISABLED;
1600}
1601
1602/**
1603 * sata_print_link_status - Print SATA link status
1604 * @ap: SATA port to printk link status about
1605 *
1606 * This function prints link speed and status of a SATA link.
1607 *
1608 * LOCKING:
1609 * None.
1610 */
1611static void sata_print_link_status(struct ata_port *ap)
1612{
1613 u32 sstatus, scontrol, tmp;
1614
1615 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1616 return;
1617 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1618
1619 if (ata_port_online(ap)) {
1620 tmp = (sstatus >> 4) & 0xf;
1621 ata_port_printk(ap, KERN_INFO,
1622 "SATA link up %s (SStatus %X SControl %X)\n",
1623 sata_spd_string(tmp), sstatus, scontrol);
1624 } else {
1625 ata_port_printk(ap, KERN_INFO,
1626 "SATA link down (SStatus %X SControl %X)\n",
1627 sstatus, scontrol);
1628 }
1629}
1630
1631/**
1632 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1633 * @ap: SATA port associated with target SATA PHY.
1634 *
1635 * This function issues commands to standard SATA Sxxx
1636 * PHY registers, to wake up the phy (and device), and
1637 * clear any reset condition.
1638 *
1639 * LOCKING:
1640 * PCI/etc. bus probe sem.
1641 *
1642 */
1643void __sata_phy_reset(struct ata_port *ap)
1644{
1645 u32 sstatus;
1646 unsigned long timeout = jiffies + (HZ * 5);
1647
1648 if (ap->flags & ATA_FLAG_SATA_RESET) {
1649 /* issue phy wake/reset */
1650 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1651 /* Couldn't find anything in SATA I/II specs, but
1652 * AHCI-1.1 10.4.2 says at least 1 ms. */
1653 mdelay(1);
1654 }
1655 /* phy wake/clear reset */
1656 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1657
1658 /* wait for phy to become ready, if necessary */
1659 do {
1660 msleep(200);
1661 sata_scr_read(ap, SCR_STATUS, &sstatus);
1662 if ((sstatus & 0xf) != 1)
1663 break;
1664 } while (time_before(jiffies, timeout));
1665
1666 /* print link status */
1667 sata_print_link_status(ap);
1668
1669 /* TODO: phy layer with polling, timeouts, etc. */
1670 if (!ata_port_offline(ap))
1671 ata_port_probe(ap);
1672 else
1673 ata_port_disable(ap);
1674
1675 if (ap->flags & ATA_FLAG_DISABLED)
1676 return;
1677
1678 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1679 ata_port_disable(ap);
1680 return;
1681 }
1682
1683 ap->cbl = ATA_CBL_SATA;
1684}
1685
1686/**
1687 * sata_phy_reset - Reset SATA bus.
1688 * @ap: SATA port associated with target SATA PHY.
1689 *
1690 * This function resets the SATA bus, and then probes
1691 * the bus for devices.
1692 *
1693 * LOCKING:
1694 * PCI/etc. bus probe sem.
1695 *
1696 */
1697void sata_phy_reset(struct ata_port *ap)
1698{
1699 __sata_phy_reset(ap);
1700 if (ap->flags & ATA_FLAG_DISABLED)
1701 return;
1702 ata_bus_reset(ap);
1703}
1704
1705/**
1706 * ata_dev_pair - return other device on cable
1707 * @adev: device
1708 *
1709 * Obtain the other device on the same cable, or if none is
1710 * present NULL is returned
1711 */
1712
1713struct ata_device *ata_dev_pair(struct ata_device *adev)
1714{
1715 struct ata_port *ap = adev->ap;
1716 struct ata_device *pair = &ap->device[1 - adev->devno];
1717 if (!ata_dev_enabled(pair))
1718 return NULL;
1719 return pair;
1720}
1721
1722/**
1723 * ata_port_disable - Disable port.
1724 * @ap: Port to be disabled.
1725 *
1726 * Modify @ap data structure such that the system
1727 * thinks that the entire port is disabled, and should
1728 * never attempt to probe or communicate with devices
1729 * on this port.
1730 *
1731 * LOCKING: host_set lock, or some other form of
1732 * serialization.
1733 */
1734
1735void ata_port_disable(struct ata_port *ap)
1736{
1737 ap->device[0].class = ATA_DEV_NONE;
1738 ap->device[1].class = ATA_DEV_NONE;
1739 ap->flags |= ATA_FLAG_DISABLED;
1740}
1741
1742/**
1743 * sata_down_spd_limit - adjust SATA spd limit downward
1744 * @ap: Port to adjust SATA spd limit for
1745 *
1746 * Adjust SATA spd limit of @ap downward. Note that this
1747 * function only adjusts the limit. The change must be applied
1748 * using sata_set_spd().
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 *
1753 * RETURNS:
1754 * 0 on success, negative errno on failure
1755 */
1756int sata_down_spd_limit(struct ata_port *ap)
1757{
1758 u32 sstatus, spd, mask;
1759 int rc, highbit;
1760
1761 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1762 if (rc)
1763 return rc;
1764
1765 mask = ap->sata_spd_limit;
1766 if (mask <= 1)
1767 return -EINVAL;
1768 highbit = fls(mask) - 1;
1769 mask &= ~(1 << highbit);
1770
1771 spd = (sstatus >> 4) & 0xf;
1772 if (spd <= 1)
1773 return -EINVAL;
1774 spd--;
1775 mask &= (1 << spd) - 1;
1776 if (!mask)
1777 return -EINVAL;
1778
1779 ap->sata_spd_limit = mask;
1780
1781 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1782 sata_spd_string(fls(mask)));
1783
1784 return 0;
1785}
1786
1787static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1788{
1789 u32 spd, limit;
1790
1791 if (ap->sata_spd_limit == UINT_MAX)
1792 limit = 0;
1793 else
1794 limit = fls(ap->sata_spd_limit);
1795
1796 spd = (*scontrol >> 4) & 0xf;
1797 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1798
1799 return spd != limit;
1800}
1801
1802/**
1803 * sata_set_spd_needed - is SATA spd configuration needed
1804 * @ap: Port in question
1805 *
1806 * Test whether the spd limit in SControl matches
1807 * @ap->sata_spd_limit. This function is used to determine
1808 * whether hardreset is necessary to apply SATA spd
1809 * configuration.
1810 *
1811 * LOCKING:
1812 * Inherited from caller.
1813 *
1814 * RETURNS:
1815 * 1 if SATA spd configuration is needed, 0 otherwise.
1816 */
1817int sata_set_spd_needed(struct ata_port *ap)
1818{
1819 u32 scontrol;
1820
1821 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1822 return 0;
1823
1824 return __sata_set_spd_needed(ap, &scontrol);
1825}
1826
1827/**
1828 * sata_set_spd - set SATA spd according to spd limit
1829 * @ap: Port to set SATA spd for
1830 *
1831 * Set SATA spd of @ap according to sata_spd_limit.
1832 *
1833 * LOCKING:
1834 * Inherited from caller.
1835 *
1836 * RETURNS:
1837 * 0 if spd doesn't need to be changed, 1 if spd has been
1838 * changed. Negative errno if SCR registers are inaccessible.
1839 */
1840int sata_set_spd(struct ata_port *ap)
1841{
1842 u32 scontrol;
1843 int rc;
1844
1845 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1846 return rc;
1847
1848 if (!__sata_set_spd_needed(ap, &scontrol))
1849 return 0;
1850
1851 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1852 return rc;
1853
1854 return 1;
1855}
1856
1857/*
1858 * This mode timing computation functionality is ported over from
1859 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1860 */
1861/*
1862 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1863 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1864 * for PIO 5, which is a nonstandard extension and UDMA6, which
1865 * is currently supported only by Maxtor drives.
1866 */
1867
1868static const struct ata_timing ata_timing[] = {
1869
1870 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1871 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1872 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1873 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1874
1875 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1876 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1877 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1878
1879/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1880
1881 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1882 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1883 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1884
1885 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1886 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1887 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1888
1889/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1890 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1891 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1892
1893 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1894 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1895 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1896
1897/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1898
1899 { 0xFF }
1900};
1901
1902#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1903#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1904
1905static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1906{
1907 q->setup = EZ(t->setup * 1000, T);
1908 q->act8b = EZ(t->act8b * 1000, T);
1909 q->rec8b = EZ(t->rec8b * 1000, T);
1910 q->cyc8b = EZ(t->cyc8b * 1000, T);
1911 q->active = EZ(t->active * 1000, T);
1912 q->recover = EZ(t->recover * 1000, T);
1913 q->cycle = EZ(t->cycle * 1000, T);
1914 q->udma = EZ(t->udma * 1000, UT);
1915}
1916
1917void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1918 struct ata_timing *m, unsigned int what)
1919{
1920 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1921 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1922 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1923 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1924 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1925 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1926 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1927 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1928}
1929
1930static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1931{
1932 const struct ata_timing *t;
1933
1934 for (t = ata_timing; t->mode != speed; t++)
1935 if (t->mode == 0xFF)
1936 return NULL;
1937 return t;
1938}
1939
1940int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1941 struct ata_timing *t, int T, int UT)
1942{
1943 const struct ata_timing *s;
1944 struct ata_timing p;
1945
1946 /*
1947 * Find the mode.
1948 */
1949
1950 if (!(s = ata_timing_find_mode(speed)))
1951 return -EINVAL;
1952
1953 memcpy(t, s, sizeof(*s));
1954
1955 /*
1956 * If the drive is an EIDE drive, it can tell us it needs extended
1957 * PIO/MW_DMA cycle timing.
1958 */
1959
1960 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1961 memset(&p, 0, sizeof(p));
1962 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1963 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1964 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1965 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1966 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1967 }
1968 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1969 }
1970
1971 /*
1972 * Convert the timing to bus clock counts.
1973 */
1974
1975 ata_timing_quantize(t, t, T, UT);
1976
1977 /*
1978 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1979 * S.M.A.R.T * and some other commands. We have to ensure that the
1980 * DMA cycle timing is slower/equal than the fastest PIO timing.
1981 */
1982
1983 if (speed > XFER_PIO_4) {
1984 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1985 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1986 }
1987
1988 /*
1989 * Lengthen active & recovery time so that cycle time is correct.
1990 */
1991
1992 if (t->act8b + t->rec8b < t->cyc8b) {
1993 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1994 t->rec8b = t->cyc8b - t->act8b;
1995 }
1996
1997 if (t->active + t->recover < t->cycle) {
1998 t->active += (t->cycle - (t->active + t->recover)) / 2;
1999 t->recover = t->cycle - t->active;
2000 }
2001
2002 return 0;
2003}
2004
2005/**
2006 * ata_down_xfermask_limit - adjust dev xfer masks downward
2007 * @dev: Device to adjust xfer masks
2008 * @force_pio0: Force PIO0
2009 *
2010 * Adjust xfer masks of @dev downward. Note that this function
2011 * does not apply the change. Invoking ata_set_mode() afterwards
2012 * will apply the limit.
2013 *
2014 * LOCKING:
2015 * Inherited from caller.
2016 *
2017 * RETURNS:
2018 * 0 on success, negative errno on failure
2019 */
2020int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2021{
2022 unsigned long xfer_mask;
2023 int highbit;
2024
2025 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2026 dev->udma_mask);
2027
2028 if (!xfer_mask)
2029 goto fail;
2030 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2031 if (xfer_mask & ATA_MASK_UDMA)
2032 xfer_mask &= ~ATA_MASK_MWDMA;
2033
2034 highbit = fls(xfer_mask) - 1;
2035 xfer_mask &= ~(1 << highbit);
2036 if (force_pio0)
2037 xfer_mask &= 1 << ATA_SHIFT_PIO;
2038 if (!xfer_mask)
2039 goto fail;
2040
2041 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2042 &dev->udma_mask);
2043
2044 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2045 ata_mode_string(xfer_mask));
2046
2047 return 0;
2048
2049 fail:
2050 return -EINVAL;
2051}
2052
2053static int ata_dev_set_mode(struct ata_device *dev)
2054{
2055 unsigned int err_mask;
2056 int rc;
2057
2058 dev->flags &= ~ATA_DFLAG_PIO;
2059 if (dev->xfer_shift == ATA_SHIFT_PIO)
2060 dev->flags |= ATA_DFLAG_PIO;
2061
2062 err_mask = ata_dev_set_xfermode(dev);
2063 if (err_mask) {
2064 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2065 "(err_mask=0x%x)\n", err_mask);
2066 return -EIO;
2067 }
2068
2069 rc = ata_dev_revalidate(dev, 0);
2070 if (rc)
2071 return rc;
2072
2073 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2074 dev->xfer_shift, (int)dev->xfer_mode);
2075
2076 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2077 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2078 return 0;
2079}
2080
2081/**
2082 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2083 * @ap: port on which timings will be programmed
2084 * @r_failed_dev: out paramter for failed device
2085 *
2086 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2087 * ata_set_mode() fails, pointer to the failing device is
2088 * returned in @r_failed_dev.
2089 *
2090 * LOCKING:
2091 * PCI/etc. bus probe sem.
2092 *
2093 * RETURNS:
2094 * 0 on success, negative errno otherwise
2095 */
2096int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2097{
2098 struct ata_device *dev;
2099 int i, rc = 0, used_dma = 0, found = 0;
2100
2101 /* has private set_mode? */
2102 if (ap->ops->set_mode) {
2103 /* FIXME: make ->set_mode handle no device case and
2104 * return error code and failing device on failure.
2105 */
2106 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2107 if (ata_dev_enabled(&ap->device[i])) {
2108 ap->ops->set_mode(ap);
2109 break;
2110 }
2111 }
2112 return 0;
2113 }
2114
2115 /* step 1: calculate xfer_mask */
2116 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2117 unsigned int pio_mask, dma_mask;
2118
2119 dev = &ap->device[i];
2120
2121 if (!ata_dev_enabled(dev))
2122 continue;
2123
2124 ata_dev_xfermask(dev);
2125
2126 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2127 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2128 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2129 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2130
2131 found = 1;
2132 if (dev->dma_mode)
2133 used_dma = 1;
2134 }
2135 if (!found)
2136 goto out;
2137
2138 /* step 2: always set host PIO timings */
2139 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2140 dev = &ap->device[i];
2141 if (!ata_dev_enabled(dev))
2142 continue;
2143
2144 if (!dev->pio_mode) {
2145 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2146 rc = -EINVAL;
2147 goto out;
2148 }
2149
2150 dev->xfer_mode = dev->pio_mode;
2151 dev->xfer_shift = ATA_SHIFT_PIO;
2152 if (ap->ops->set_piomode)
2153 ap->ops->set_piomode(ap, dev);
2154 }
2155
2156 /* step 3: set host DMA timings */
2157 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2158 dev = &ap->device[i];
2159
2160 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2161 continue;
2162
2163 dev->xfer_mode = dev->dma_mode;
2164 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2165 if (ap->ops->set_dmamode)
2166 ap->ops->set_dmamode(ap, dev);
2167 }
2168
2169 /* step 4: update devices' xfer mode */
2170 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2171 dev = &ap->device[i];
2172
2173 if (!ata_dev_enabled(dev))
2174 continue;
2175
2176 rc = ata_dev_set_mode(dev);
2177 if (rc)
2178 goto out;
2179 }
2180
2181 /* Record simplex status. If we selected DMA then the other
2182 * host channels are not permitted to do so.
2183 */
2184 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2185 ap->host_set->simplex_claimed = 1;
2186
2187 /* step5: chip specific finalisation */
2188 if (ap->ops->post_set_mode)
2189 ap->ops->post_set_mode(ap);
2190
2191 out:
2192 if (rc)
2193 *r_failed_dev = dev;
2194 return rc;
2195}
2196
2197/**
2198 * ata_tf_to_host - issue ATA taskfile to host controller
2199 * @ap: port to which command is being issued
2200 * @tf: ATA taskfile register set
2201 *
2202 * Issues ATA taskfile register set to ATA host controller,
2203 * with proper synchronization with interrupt handler and
2204 * other threads.
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host_set lock)
2208 */
2209
2210static inline void ata_tf_to_host(struct ata_port *ap,
2211 const struct ata_taskfile *tf)
2212{
2213 ap->ops->tf_load(ap, tf);
2214 ap->ops->exec_command(ap, tf);
2215}
2216
2217/**
2218 * ata_busy_sleep - sleep until BSY clears, or timeout
2219 * @ap: port containing status register to be polled
2220 * @tmout_pat: impatience timeout
2221 * @tmout: overall timeout
2222 *
2223 * Sleep until ATA Status register bit BSY clears,
2224 * or a timeout occurs.
2225 *
2226 * LOCKING: None.
2227 */
2228
2229unsigned int ata_busy_sleep (struct ata_port *ap,
2230 unsigned long tmout_pat, unsigned long tmout)
2231{
2232 unsigned long timer_start, timeout;
2233 u8 status;
2234
2235 status = ata_busy_wait(ap, ATA_BUSY, 300);
2236 timer_start = jiffies;
2237 timeout = timer_start + tmout_pat;
2238 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2239 msleep(50);
2240 status = ata_busy_wait(ap, ATA_BUSY, 3);
2241 }
2242
2243 if (status & ATA_BUSY)
2244 ata_port_printk(ap, KERN_WARNING,
2245 "port is slow to respond, please be patient\n");
2246
2247 timeout = timer_start + tmout;
2248 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2249 msleep(50);
2250 status = ata_chk_status(ap);
2251 }
2252
2253 if (status & ATA_BUSY) {
2254 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2255 "(%lu secs)\n", tmout / HZ);
2256 return 1;
2257 }
2258
2259 return 0;
2260}
2261
2262static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2263{
2264 struct ata_ioports *ioaddr = &ap->ioaddr;
2265 unsigned int dev0 = devmask & (1 << 0);
2266 unsigned int dev1 = devmask & (1 << 1);
2267 unsigned long timeout;
2268
2269 /* if device 0 was found in ata_devchk, wait for its
2270 * BSY bit to clear
2271 */
2272 if (dev0)
2273 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2274
2275 /* if device 1 was found in ata_devchk, wait for
2276 * register access, then wait for BSY to clear
2277 */
2278 timeout = jiffies + ATA_TMOUT_BOOT;
2279 while (dev1) {
2280 u8 nsect, lbal;
2281
2282 ap->ops->dev_select(ap, 1);
2283 if (ap->flags & ATA_FLAG_MMIO) {
2284 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2285 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2286 } else {
2287 nsect = inb(ioaddr->nsect_addr);
2288 lbal = inb(ioaddr->lbal_addr);
2289 }
2290 if ((nsect == 1) && (lbal == 1))
2291 break;
2292 if (time_after(jiffies, timeout)) {
2293 dev1 = 0;
2294 break;
2295 }
2296 msleep(50); /* give drive a breather */
2297 }
2298 if (dev1)
2299 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2300
2301 /* is all this really necessary? */
2302 ap->ops->dev_select(ap, 0);
2303 if (dev1)
2304 ap->ops->dev_select(ap, 1);
2305 if (dev0)
2306 ap->ops->dev_select(ap, 0);
2307}
2308
2309static unsigned int ata_bus_softreset(struct ata_port *ap,
2310 unsigned int devmask)
2311{
2312 struct ata_ioports *ioaddr = &ap->ioaddr;
2313
2314 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2315
2316 /* software reset. causes dev0 to be selected */
2317 if (ap->flags & ATA_FLAG_MMIO) {
2318 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2319 udelay(20); /* FIXME: flush */
2320 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2321 udelay(20); /* FIXME: flush */
2322 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2323 } else {
2324 outb(ap->ctl, ioaddr->ctl_addr);
2325 udelay(10);
2326 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2327 udelay(10);
2328 outb(ap->ctl, ioaddr->ctl_addr);
2329 }
2330
2331 /* spec mandates ">= 2ms" before checking status.
2332 * We wait 150ms, because that was the magic delay used for
2333 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2334 * between when the ATA command register is written, and then
2335 * status is checked. Because waiting for "a while" before
2336 * checking status is fine, post SRST, we perform this magic
2337 * delay here as well.
2338 *
2339 * Old drivers/ide uses the 2mS rule and then waits for ready
2340 */
2341 msleep(150);
2342
2343 /* Before we perform post reset processing we want to see if
2344 * the bus shows 0xFF because the odd clown forgets the D7
2345 * pulldown resistor.
2346 */
2347 if (ata_check_status(ap) == 0xFF) {
2348 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2349 return AC_ERR_OTHER;
2350 }
2351
2352 ata_bus_post_reset(ap, devmask);
2353
2354 return 0;
2355}
2356
2357/**
2358 * ata_bus_reset - reset host port and associated ATA channel
2359 * @ap: port to reset
2360 *
2361 * This is typically the first time we actually start issuing
2362 * commands to the ATA channel. We wait for BSY to clear, then
2363 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2364 * result. Determine what devices, if any, are on the channel
2365 * by looking at the device 0/1 error register. Look at the signature
2366 * stored in each device's taskfile registers, to determine if
2367 * the device is ATA or ATAPI.
2368 *
2369 * LOCKING:
2370 * PCI/etc. bus probe sem.
2371 * Obtains host_set lock.
2372 *
2373 * SIDE EFFECTS:
2374 * Sets ATA_FLAG_DISABLED if bus reset fails.
2375 */
2376
2377void ata_bus_reset(struct ata_port *ap)
2378{
2379 struct ata_ioports *ioaddr = &ap->ioaddr;
2380 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2381 u8 err;
2382 unsigned int dev0, dev1 = 0, devmask = 0;
2383
2384 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2385
2386 /* determine if device 0/1 are present */
2387 if (ap->flags & ATA_FLAG_SATA_RESET)
2388 dev0 = 1;
2389 else {
2390 dev0 = ata_devchk(ap, 0);
2391 if (slave_possible)
2392 dev1 = ata_devchk(ap, 1);
2393 }
2394
2395 if (dev0)
2396 devmask |= (1 << 0);
2397 if (dev1)
2398 devmask |= (1 << 1);
2399
2400 /* select device 0 again */
2401 ap->ops->dev_select(ap, 0);
2402
2403 /* issue bus reset */
2404 if (ap->flags & ATA_FLAG_SRST)
2405 if (ata_bus_softreset(ap, devmask))
2406 goto err_out;
2407
2408 /*
2409 * determine by signature whether we have ATA or ATAPI devices
2410 */
2411 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2412 if ((slave_possible) && (err != 0x81))
2413 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2414
2415 /* re-enable interrupts */
2416 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2417 ata_irq_on(ap);
2418
2419 /* is double-select really necessary? */
2420 if (ap->device[1].class != ATA_DEV_NONE)
2421 ap->ops->dev_select(ap, 1);
2422 if (ap->device[0].class != ATA_DEV_NONE)
2423 ap->ops->dev_select(ap, 0);
2424
2425 /* if no devices were detected, disable this port */
2426 if ((ap->device[0].class == ATA_DEV_NONE) &&
2427 (ap->device[1].class == ATA_DEV_NONE))
2428 goto err_out;
2429
2430 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2431 /* set up device control for ATA_FLAG_SATA_RESET */
2432 if (ap->flags & ATA_FLAG_MMIO)
2433 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2434 else
2435 outb(ap->ctl, ioaddr->ctl_addr);
2436 }
2437
2438 DPRINTK("EXIT\n");
2439 return;
2440
2441err_out:
2442 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2443 ap->ops->port_disable(ap);
2444
2445 DPRINTK("EXIT\n");
2446}
2447
2448/**
2449 * sata_phy_debounce - debounce SATA phy status
2450 * @ap: ATA port to debounce SATA phy status for
2451 * @params: timing parameters { interval, duratinon, timeout } in msec
2452 *
2453 * Make sure SStatus of @ap reaches stable state, determined by
2454 * holding the same value where DET is not 1 for @duration polled
2455 * every @interval, before @timeout. Timeout constraints the
2456 * beginning of the stable state. Because, after hot unplugging,
2457 * DET gets stuck at 1 on some controllers, this functions waits
2458 * until timeout then returns 0 if DET is stable at 1.
2459 *
2460 * LOCKING:
2461 * Kernel thread context (may sleep)
2462 *
2463 * RETURNS:
2464 * 0 on success, -errno on failure.
2465 */
2466int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2467{
2468 unsigned long interval_msec = params[0];
2469 unsigned long duration = params[1] * HZ / 1000;
2470 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2471 unsigned long last_jiffies;
2472 u32 last, cur;
2473 int rc;
2474
2475 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2476 return rc;
2477 cur &= 0xf;
2478
2479 last = cur;
2480 last_jiffies = jiffies;
2481
2482 while (1) {
2483 msleep(interval_msec);
2484 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2485 return rc;
2486 cur &= 0xf;
2487
2488 /* DET stable? */
2489 if (cur == last) {
2490 if (cur == 1 && time_before(jiffies, timeout))
2491 continue;
2492 if (time_after(jiffies, last_jiffies + duration))
2493 return 0;
2494 continue;
2495 }
2496
2497 /* unstable, start over */
2498 last = cur;
2499 last_jiffies = jiffies;
2500
2501 /* check timeout */
2502 if (time_after(jiffies, timeout))
2503 return -EBUSY;
2504 }
2505}
2506
2507/**
2508 * sata_phy_resume - resume SATA phy
2509 * @ap: ATA port to resume SATA phy for
2510 * @params: timing parameters { interval, duratinon, timeout } in msec
2511 *
2512 * Resume SATA phy of @ap and debounce it.
2513 *
2514 * LOCKING:
2515 * Kernel thread context (may sleep)
2516 *
2517 * RETURNS:
2518 * 0 on success, -errno on failure.
2519 */
2520int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2521{
2522 u32 scontrol;
2523 int rc;
2524
2525 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2526 return rc;
2527
2528 scontrol = (scontrol & 0x0f0) | 0x300;
2529
2530 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2531 return rc;
2532
2533 /* Some PHYs react badly if SStatus is pounded immediately
2534 * after resuming. Delay 200ms before debouncing.
2535 */
2536 msleep(200);
2537
2538 return sata_phy_debounce(ap, params);
2539}
2540
2541static void ata_wait_spinup(struct ata_port *ap)
2542{
2543 struct ata_eh_context *ehc = &ap->eh_context;
2544 unsigned long end, secs;
2545 int rc;
2546
2547 /* first, debounce phy if SATA */
2548 if (ap->cbl == ATA_CBL_SATA) {
2549 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2550
2551 /* if debounced successfully and offline, no need to wait */
2552 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2553 return;
2554 }
2555
2556 /* okay, let's give the drive time to spin up */
2557 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2558 secs = ((end - jiffies) + HZ - 1) / HZ;
2559
2560 if (time_after(jiffies, end))
2561 return;
2562
2563 if (secs > 5)
2564 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2565 "(%lu secs)\n", secs);
2566
2567 schedule_timeout_uninterruptible(end - jiffies);
2568}
2569
2570/**
2571 * ata_std_prereset - prepare for reset
2572 * @ap: ATA port to be reset
2573 *
2574 * @ap is about to be reset. Initialize it.
2575 *
2576 * LOCKING:
2577 * Kernel thread context (may sleep)
2578 *
2579 * RETURNS:
2580 * 0 on success, -errno otherwise.
2581 */
2582int ata_std_prereset(struct ata_port *ap)
2583{
2584 struct ata_eh_context *ehc = &ap->eh_context;
2585 const unsigned long *timing;
2586 int rc;
2587
2588 /* hotplug? */
2589 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2590 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2591 ehc->i.action |= ATA_EH_HARDRESET;
2592 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2593 ata_wait_spinup(ap);
2594 }
2595
2596 /* if we're about to do hardreset, nothing more to do */
2597 if (ehc->i.action & ATA_EH_HARDRESET)
2598 return 0;
2599
2600 /* if SATA, resume phy */
2601 if (ap->cbl == ATA_CBL_SATA) {
2602 if (ap->flags & ATA_FLAG_LOADING)
2603 timing = sata_deb_timing_boot;
2604 else
2605 timing = sata_deb_timing_eh;
2606
2607 rc = sata_phy_resume(ap, timing);
2608 if (rc && rc != -EOPNOTSUPP) {
2609 /* phy resume failed */
2610 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2611 "link for reset (errno=%d)\n", rc);
2612 return rc;
2613 }
2614 }
2615
2616 /* Wait for !BSY if the controller can wait for the first D2H
2617 * Reg FIS and we don't know that no device is attached.
2618 */
2619 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2620 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2621
2622 return 0;
2623}
2624
2625/**
2626 * ata_std_softreset - reset host port via ATA SRST
2627 * @ap: port to reset
2628 * @classes: resulting classes of attached devices
2629 *
2630 * Reset host port using ATA SRST.
2631 *
2632 * LOCKING:
2633 * Kernel thread context (may sleep)
2634 *
2635 * RETURNS:
2636 * 0 on success, -errno otherwise.
2637 */
2638int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2639{
2640 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2641 unsigned int devmask = 0, err_mask;
2642 u8 err;
2643
2644 DPRINTK("ENTER\n");
2645
2646 if (ata_port_offline(ap)) {
2647 classes[0] = ATA_DEV_NONE;
2648 goto out;
2649 }
2650
2651 /* determine if device 0/1 are present */
2652 if (ata_devchk(ap, 0))
2653 devmask |= (1 << 0);
2654 if (slave_possible && ata_devchk(ap, 1))
2655 devmask |= (1 << 1);
2656
2657 /* select device 0 again */
2658 ap->ops->dev_select(ap, 0);
2659
2660 /* issue bus reset */
2661 DPRINTK("about to softreset, devmask=%x\n", devmask);
2662 err_mask = ata_bus_softreset(ap, devmask);
2663 if (err_mask) {
2664 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2665 err_mask);
2666 return -EIO;
2667 }
2668
2669 /* determine by signature whether we have ATA or ATAPI devices */
2670 classes[0] = ata_dev_try_classify(ap, 0, &err);
2671 if (slave_possible && err != 0x81)
2672 classes[1] = ata_dev_try_classify(ap, 1, &err);
2673
2674 out:
2675 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2676 return 0;
2677}
2678
2679/**
2680 * sata_std_hardreset - reset host port via SATA phy reset
2681 * @ap: port to reset
2682 * @class: resulting class of attached device
2683 *
2684 * SATA phy-reset host port using DET bits of SControl register.
2685 *
2686 * LOCKING:
2687 * Kernel thread context (may sleep)
2688 *
2689 * RETURNS:
2690 * 0 on success, -errno otherwise.
2691 */
2692int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2693{
2694 u32 scontrol;
2695 int rc;
2696
2697 DPRINTK("ENTER\n");
2698
2699 if (sata_set_spd_needed(ap)) {
2700 /* SATA spec says nothing about how to reconfigure
2701 * spd. To be on the safe side, turn off phy during
2702 * reconfiguration. This works for at least ICH7 AHCI
2703 * and Sil3124.
2704 */
2705 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2706 return rc;
2707
2708 scontrol = (scontrol & 0x0f0) | 0x302;
2709
2710 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2711 return rc;
2712
2713 sata_set_spd(ap);
2714 }
2715
2716 /* issue phy wake/reset */
2717 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2718 return rc;
2719
2720 scontrol = (scontrol & 0x0f0) | 0x301;
2721
2722 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2723 return rc;
2724
2725 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2726 * 10.4.2 says at least 1 ms.
2727 */
2728 msleep(1);
2729
2730 /* bring phy back */
2731 sata_phy_resume(ap, sata_deb_timing_eh);
2732
2733 /* TODO: phy layer with polling, timeouts, etc. */
2734 if (ata_port_offline(ap)) {
2735 *class = ATA_DEV_NONE;
2736 DPRINTK("EXIT, link offline\n");
2737 return 0;
2738 }
2739
2740 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2741 ata_port_printk(ap, KERN_ERR,
2742 "COMRESET failed (device not ready)\n");
2743 return -EIO;
2744 }
2745
2746 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2747
2748 *class = ata_dev_try_classify(ap, 0, NULL);
2749
2750 DPRINTK("EXIT, class=%u\n", *class);
2751 return 0;
2752}
2753
2754/**
2755 * ata_std_postreset - standard postreset callback
2756 * @ap: the target ata_port
2757 * @classes: classes of attached devices
2758 *
2759 * This function is invoked after a successful reset. Note that
2760 * the device might have been reset more than once using
2761 * different reset methods before postreset is invoked.
2762 *
2763 * LOCKING:
2764 * Kernel thread context (may sleep)
2765 */
2766void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2767{
2768 u32 serror;
2769
2770 DPRINTK("ENTER\n");
2771
2772 /* print link status */
2773 sata_print_link_status(ap);
2774
2775 /* clear SError */
2776 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2777 sata_scr_write(ap, SCR_ERROR, serror);
2778
2779 /* re-enable interrupts */
2780 if (!ap->ops->error_handler) {
2781 /* FIXME: hack. create a hook instead */
2782 if (ap->ioaddr.ctl_addr)
2783 ata_irq_on(ap);
2784 }
2785
2786 /* is double-select really necessary? */
2787 if (classes[0] != ATA_DEV_NONE)
2788 ap->ops->dev_select(ap, 1);
2789 if (classes[1] != ATA_DEV_NONE)
2790 ap->ops->dev_select(ap, 0);
2791
2792 /* bail out if no device is present */
2793 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2794 DPRINTK("EXIT, no device\n");
2795 return;
2796 }
2797
2798 /* set up device control */
2799 if (ap->ioaddr.ctl_addr) {
2800 if (ap->flags & ATA_FLAG_MMIO)
2801 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2802 else
2803 outb(ap->ctl, ap->ioaddr.ctl_addr);
2804 }
2805
2806 DPRINTK("EXIT\n");
2807}
2808
2809/**
2810 * ata_dev_same_device - Determine whether new ID matches configured device
2811 * @dev: device to compare against
2812 * @new_class: class of the new device
2813 * @new_id: IDENTIFY page of the new device
2814 *
2815 * Compare @new_class and @new_id against @dev and determine
2816 * whether @dev is the device indicated by @new_class and
2817 * @new_id.
2818 *
2819 * LOCKING:
2820 * None.
2821 *
2822 * RETURNS:
2823 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2824 */
2825static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2826 const u16 *new_id)
2827{
2828 const u16 *old_id = dev->id;
2829 unsigned char model[2][41], serial[2][21];
2830 u64 new_n_sectors;
2831
2832 if (dev->class != new_class) {
2833 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2834 dev->class, new_class);
2835 return 0;
2836 }
2837
2838 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2839 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2840 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2841 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2842 new_n_sectors = ata_id_n_sectors(new_id);
2843
2844 if (strcmp(model[0], model[1])) {
2845 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2846 "'%s' != '%s'\n", model[0], model[1]);
2847 return 0;
2848 }
2849
2850 if (strcmp(serial[0], serial[1])) {
2851 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2852 "'%s' != '%s'\n", serial[0], serial[1]);
2853 return 0;
2854 }
2855
2856 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2857 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2858 "%llu != %llu\n",
2859 (unsigned long long)dev->n_sectors,
2860 (unsigned long long)new_n_sectors);
2861 return 0;
2862 }
2863
2864 return 1;
2865}
2866
2867/**
2868 * ata_dev_revalidate - Revalidate ATA device
2869 * @dev: device to revalidate
2870 * @post_reset: is this revalidation after reset?
2871 *
2872 * Re-read IDENTIFY page and make sure @dev is still attached to
2873 * the port.
2874 *
2875 * LOCKING:
2876 * Kernel thread context (may sleep)
2877 *
2878 * RETURNS:
2879 * 0 on success, negative errno otherwise
2880 */
2881int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2882{
2883 unsigned int class = dev->class;
2884 u16 *id = (void *)dev->ap->sector_buf;
2885 int rc;
2886
2887 if (!ata_dev_enabled(dev)) {
2888 rc = -ENODEV;
2889 goto fail;
2890 }
2891
2892 /* read ID data */
2893 rc = ata_dev_read_id(dev, &class, post_reset, id);
2894 if (rc)
2895 goto fail;
2896
2897 /* is the device still there? */
2898 if (!ata_dev_same_device(dev, class, id)) {
2899 rc = -ENODEV;
2900 goto fail;
2901 }
2902
2903 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2904
2905 /* configure device according to the new ID */
2906 rc = ata_dev_configure(dev, 0);
2907 if (rc == 0)
2908 return 0;
2909
2910 fail:
2911 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2912 return rc;
2913}
2914
2915static const char * const ata_dma_blacklist [] = {
2916 "WDC AC11000H", NULL,
2917 "WDC AC22100H", NULL,
2918 "WDC AC32500H", NULL,
2919 "WDC AC33100H", NULL,
2920 "WDC AC31600H", NULL,
2921 "WDC AC32100H", "24.09P07",
2922 "WDC AC23200L", "21.10N21",
2923 "Compaq CRD-8241B", NULL,
2924 "CRD-8400B", NULL,
2925 "CRD-8480B", NULL,
2926 "CRD-8482B", NULL,
2927 "CRD-84", NULL,
2928 "SanDisk SDP3B", NULL,
2929 "SanDisk SDP3B-64", NULL,
2930 "SANYO CD-ROM CRD", NULL,
2931 "HITACHI CDR-8", NULL,
2932 "HITACHI CDR-8335", NULL,
2933 "HITACHI CDR-8435", NULL,
2934 "Toshiba CD-ROM XM-6202B", NULL,
2935 "TOSHIBA CD-ROM XM-1702BC", NULL,
2936 "CD-532E-A", NULL,
2937 "E-IDE CD-ROM CR-840", NULL,
2938 "CD-ROM Drive/F5A", NULL,
2939 "WPI CDD-820", NULL,
2940 "SAMSUNG CD-ROM SC-148C", NULL,
2941 "SAMSUNG CD-ROM SC", NULL,
2942 "SanDisk SDP3B-64", NULL,
2943 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2944 "_NEC DV5800A", NULL,
2945 "SAMSUNG CD-ROM SN-124", "N001"
2946};
2947
2948static int ata_strim(char *s, size_t len)
2949{
2950 len = strnlen(s, len);
2951
2952 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2953 while ((len > 0) && (s[len - 1] == ' ')) {
2954 len--;
2955 s[len] = 0;
2956 }
2957 return len;
2958}
2959
2960static int ata_dma_blacklisted(const struct ata_device *dev)
2961{
2962 unsigned char model_num[40];
2963 unsigned char model_rev[16];
2964 unsigned int nlen, rlen;
2965 int i;
2966
2967 /* We don't support polling DMA.
2968 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
2969 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
2970 */
2971 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
2972 (dev->flags & ATA_DFLAG_CDB_INTR))
2973 return 1;
2974
2975 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2976 sizeof(model_num));
2977 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2978 sizeof(model_rev));
2979 nlen = ata_strim(model_num, sizeof(model_num));
2980 rlen = ata_strim(model_rev, sizeof(model_rev));
2981
2982 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2983 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2984 if (ata_dma_blacklist[i+1] == NULL)
2985 return 1;
2986 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2987 return 1;
2988 }
2989 }
2990 return 0;
2991}
2992
2993/**
2994 * ata_dev_xfermask - Compute supported xfermask of the given device
2995 * @dev: Device to compute xfermask for
2996 *
2997 * Compute supported xfermask of @dev and store it in
2998 * dev->*_mask. This function is responsible for applying all
2999 * known limits including host controller limits, device
3000 * blacklist, etc...
3001 *
3002 * FIXME: The current implementation limits all transfer modes to
3003 * the fastest of the lowested device on the port. This is not
3004 * required on most controllers.
3005 *
3006 * LOCKING:
3007 * None.
3008 */
3009static void ata_dev_xfermask(struct ata_device *dev)
3010{
3011 struct ata_port *ap = dev->ap;
3012 struct ata_host_set *hs = ap->host_set;
3013 unsigned long xfer_mask;
3014 int i;
3015
3016 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3017 ap->mwdma_mask, ap->udma_mask);
3018
3019 /* Apply cable rule here. Don't apply it early because when
3020 * we handle hot plug the cable type can itself change.
3021 */
3022 if (ap->cbl == ATA_CBL_PATA40)
3023 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3024
3025 /* FIXME: Use port-wide xfermask for now */
3026 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3027 struct ata_device *d = &ap->device[i];
3028
3029 if (ata_dev_absent(d))
3030 continue;
3031
3032 if (ata_dev_disabled(d)) {
3033 /* to avoid violating device selection timing */
3034 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3035 UINT_MAX, UINT_MAX);
3036 continue;
3037 }
3038
3039 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3040 d->mwdma_mask, d->udma_mask);
3041 xfer_mask &= ata_id_xfermask(d->id);
3042 if (ata_dma_blacklisted(d))
3043 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3044 }
3045
3046 if (ata_dma_blacklisted(dev))
3047 ata_dev_printk(dev, KERN_WARNING,
3048 "device is on DMA blacklist, disabling DMA\n");
3049
3050 if (hs->flags & ATA_HOST_SIMPLEX) {
3051 if (hs->simplex_claimed)
3052 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3053 }
3054
3055 if (ap->ops->mode_filter)
3056 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3057
3058 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3059 &dev->mwdma_mask, &dev->udma_mask);
3060}
3061
3062/**
3063 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3064 * @dev: Device to which command will be sent
3065 *
3066 * Issue SET FEATURES - XFER MODE command to device @dev
3067 * on port @ap.
3068 *
3069 * LOCKING:
3070 * PCI/etc. bus probe sem.
3071 *
3072 * RETURNS:
3073 * 0 on success, AC_ERR_* mask otherwise.
3074 */
3075
3076static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3077{
3078 struct ata_taskfile tf;
3079 unsigned int err_mask;
3080
3081 /* set up set-features taskfile */
3082 DPRINTK("set features - xfer mode\n");
3083
3084 ata_tf_init(dev, &tf);
3085 tf.command = ATA_CMD_SET_FEATURES;
3086 tf.feature = SETFEATURES_XFER;
3087 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3088 tf.protocol = ATA_PROT_NODATA;
3089 tf.nsect = dev->xfer_mode;
3090
3091 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3092
3093 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3094 return err_mask;
3095}
3096
3097/**
3098 * ata_dev_init_params - Issue INIT DEV PARAMS command
3099 * @dev: Device to which command will be sent
3100 * @heads: Number of heads (taskfile parameter)
3101 * @sectors: Number of sectors (taskfile parameter)
3102 *
3103 * LOCKING:
3104 * Kernel thread context (may sleep)
3105 *
3106 * RETURNS:
3107 * 0 on success, AC_ERR_* mask otherwise.
3108 */
3109static unsigned int ata_dev_init_params(struct ata_device *dev,
3110 u16 heads, u16 sectors)
3111{
3112 struct ata_taskfile tf;
3113 unsigned int err_mask;
3114
3115 /* Number of sectors per track 1-255. Number of heads 1-16 */
3116 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3117 return AC_ERR_INVALID;
3118
3119 /* set up init dev params taskfile */
3120 DPRINTK("init dev params \n");
3121
3122 ata_tf_init(dev, &tf);
3123 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3124 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3125 tf.protocol = ATA_PROT_NODATA;
3126 tf.nsect = sectors;
3127 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3128
3129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3130
3131 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3132 return err_mask;
3133}
3134
3135/**
3136 * ata_sg_clean - Unmap DMA memory associated with command
3137 * @qc: Command containing DMA memory to be released
3138 *
3139 * Unmap all mapped DMA memory associated with this command.
3140 *
3141 * LOCKING:
3142 * spin_lock_irqsave(host_set lock)
3143 */
3144
3145static void ata_sg_clean(struct ata_queued_cmd *qc)
3146{
3147 struct ata_port *ap = qc->ap;
3148 struct scatterlist *sg = qc->__sg;
3149 int dir = qc->dma_dir;
3150 void *pad_buf = NULL;
3151
3152 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3153 WARN_ON(sg == NULL);
3154
3155 if (qc->flags & ATA_QCFLAG_SINGLE)
3156 WARN_ON(qc->n_elem > 1);
3157
3158 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3159
3160 /* if we padded the buffer out to 32-bit bound, and data
3161 * xfer direction is from-device, we must copy from the
3162 * pad buffer back into the supplied buffer
3163 */
3164 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3165 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3166
3167 if (qc->flags & ATA_QCFLAG_SG) {
3168 if (qc->n_elem)
3169 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3170 /* restore last sg */
3171 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3172 if (pad_buf) {
3173 struct scatterlist *psg = &qc->pad_sgent;
3174 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3175 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3176 kunmap_atomic(addr, KM_IRQ0);
3177 }
3178 } else {
3179 if (qc->n_elem)
3180 dma_unmap_single(ap->dev,
3181 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3182 dir);
3183 /* restore sg */
3184 sg->length += qc->pad_len;
3185 if (pad_buf)
3186 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3187 pad_buf, qc->pad_len);
3188 }
3189
3190 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3191 qc->__sg = NULL;
3192}
3193
3194/**
3195 * ata_fill_sg - Fill PCI IDE PRD table
3196 * @qc: Metadata associated with taskfile to be transferred
3197 *
3198 * Fill PCI IDE PRD (scatter-gather) table with segments
3199 * associated with the current disk command.
3200 *
3201 * LOCKING:
3202 * spin_lock_irqsave(host_set lock)
3203 *
3204 */
3205static void ata_fill_sg(struct ata_queued_cmd *qc)
3206{
3207 struct ata_port *ap = qc->ap;
3208 struct scatterlist *sg;
3209 unsigned int idx;
3210
3211 WARN_ON(qc->__sg == NULL);
3212 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3213
3214 idx = 0;
3215 ata_for_each_sg(sg, qc) {
3216 u32 addr, offset;
3217 u32 sg_len, len;
3218
3219 /* determine if physical DMA addr spans 64K boundary.
3220 * Note h/w doesn't support 64-bit, so we unconditionally
3221 * truncate dma_addr_t to u32.
3222 */
3223 addr = (u32) sg_dma_address(sg);
3224 sg_len = sg_dma_len(sg);
3225
3226 while (sg_len) {
3227 offset = addr & 0xffff;
3228 len = sg_len;
3229 if ((offset + sg_len) > 0x10000)
3230 len = 0x10000 - offset;
3231
3232 ap->prd[idx].addr = cpu_to_le32(addr);
3233 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3234 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3235
3236 idx++;
3237 sg_len -= len;
3238 addr += len;
3239 }
3240 }
3241
3242 if (idx)
3243 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3244}
3245/**
3246 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3247 * @qc: Metadata associated with taskfile to check
3248 *
3249 * Allow low-level driver to filter ATA PACKET commands, returning
3250 * a status indicating whether or not it is OK to use DMA for the
3251 * supplied PACKET command.
3252 *
3253 * LOCKING:
3254 * spin_lock_irqsave(host_set lock)
3255 *
3256 * RETURNS: 0 when ATAPI DMA can be used
3257 * nonzero otherwise
3258 */
3259int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3260{
3261 struct ata_port *ap = qc->ap;
3262 int rc = 0; /* Assume ATAPI DMA is OK by default */
3263
3264 if (ap->ops->check_atapi_dma)
3265 rc = ap->ops->check_atapi_dma(qc);
3266
3267 return rc;
3268}
3269/**
3270 * ata_qc_prep - Prepare taskfile for submission
3271 * @qc: Metadata associated with taskfile to be prepared
3272 *
3273 * Prepare ATA taskfile for submission.
3274 *
3275 * LOCKING:
3276 * spin_lock_irqsave(host_set lock)
3277 */
3278void ata_qc_prep(struct ata_queued_cmd *qc)
3279{
3280 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3281 return;
3282
3283 ata_fill_sg(qc);
3284}
3285
3286void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3287
3288/**
3289 * ata_sg_init_one - Associate command with memory buffer
3290 * @qc: Command to be associated
3291 * @buf: Memory buffer
3292 * @buflen: Length of memory buffer, in bytes.
3293 *
3294 * Initialize the data-related elements of queued_cmd @qc
3295 * to point to a single memory buffer, @buf of byte length @buflen.
3296 *
3297 * LOCKING:
3298 * spin_lock_irqsave(host_set lock)
3299 */
3300
3301void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3302{
3303 struct scatterlist *sg;
3304
3305 qc->flags |= ATA_QCFLAG_SINGLE;
3306
3307 memset(&qc->sgent, 0, sizeof(qc->sgent));
3308 qc->__sg = &qc->sgent;
3309 qc->n_elem = 1;
3310 qc->orig_n_elem = 1;
3311 qc->buf_virt = buf;
3312 qc->nbytes = buflen;
3313
3314 sg = qc->__sg;
3315 sg_init_one(sg, buf, buflen);
3316}
3317
3318/**
3319 * ata_sg_init - Associate command with scatter-gather table.
3320 * @qc: Command to be associated
3321 * @sg: Scatter-gather table.
3322 * @n_elem: Number of elements in s/g table.
3323 *
3324 * Initialize the data-related elements of queued_cmd @qc
3325 * to point to a scatter-gather table @sg, containing @n_elem
3326 * elements.
3327 *
3328 * LOCKING:
3329 * spin_lock_irqsave(host_set lock)
3330 */
3331
3332void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3333 unsigned int n_elem)
3334{
3335 qc->flags |= ATA_QCFLAG_SG;
3336 qc->__sg = sg;
3337 qc->n_elem = n_elem;
3338 qc->orig_n_elem = n_elem;
3339}
3340
3341/**
3342 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3343 * @qc: Command with memory buffer to be mapped.
3344 *
3345 * DMA-map the memory buffer associated with queued_cmd @qc.
3346 *
3347 * LOCKING:
3348 * spin_lock_irqsave(host_set lock)
3349 *
3350 * RETURNS:
3351 * Zero on success, negative on error.
3352 */
3353
3354static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3355{
3356 struct ata_port *ap = qc->ap;
3357 int dir = qc->dma_dir;
3358 struct scatterlist *sg = qc->__sg;
3359 dma_addr_t dma_address;
3360 int trim_sg = 0;
3361
3362 /* we must lengthen transfers to end on a 32-bit boundary */
3363 qc->pad_len = sg->length & 3;
3364 if (qc->pad_len) {
3365 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3366 struct scatterlist *psg = &qc->pad_sgent;
3367
3368 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3369
3370 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3371
3372 if (qc->tf.flags & ATA_TFLAG_WRITE)
3373 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3374 qc->pad_len);
3375
3376 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3377 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3378 /* trim sg */
3379 sg->length -= qc->pad_len;
3380 if (sg->length == 0)
3381 trim_sg = 1;
3382
3383 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3384 sg->length, qc->pad_len);
3385 }
3386
3387 if (trim_sg) {
3388 qc->n_elem--;
3389 goto skip_map;
3390 }
3391
3392 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3393 sg->length, dir);
3394 if (dma_mapping_error(dma_address)) {
3395 /* restore sg */
3396 sg->length += qc->pad_len;
3397 return -1;
3398 }
3399
3400 sg_dma_address(sg) = dma_address;
3401 sg_dma_len(sg) = sg->length;
3402
3403skip_map:
3404 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3405 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3406
3407 return 0;
3408}
3409
3410/**
3411 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3412 * @qc: Command with scatter-gather table to be mapped.
3413 *
3414 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3415 *
3416 * LOCKING:
3417 * spin_lock_irqsave(host_set lock)
3418 *
3419 * RETURNS:
3420 * Zero on success, negative on error.
3421 *
3422 */
3423
3424static int ata_sg_setup(struct ata_queued_cmd *qc)
3425{
3426 struct ata_port *ap = qc->ap;
3427 struct scatterlist *sg = qc->__sg;
3428 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3429 int n_elem, pre_n_elem, dir, trim_sg = 0;
3430
3431 VPRINTK("ENTER, ata%u\n", ap->id);
3432 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3433
3434 /* we must lengthen transfers to end on a 32-bit boundary */
3435 qc->pad_len = lsg->length & 3;
3436 if (qc->pad_len) {
3437 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3438 struct scatterlist *psg = &qc->pad_sgent;
3439 unsigned int offset;
3440
3441 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3442
3443 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3444
3445 /*
3446 * psg->page/offset are used to copy to-be-written
3447 * data in this function or read data in ata_sg_clean.
3448 */
3449 offset = lsg->offset + lsg->length - qc->pad_len;
3450 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3451 psg->offset = offset_in_page(offset);
3452
3453 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3454 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3455 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3456 kunmap_atomic(addr, KM_IRQ0);
3457 }
3458
3459 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3460 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3461 /* trim last sg */
3462 lsg->length -= qc->pad_len;
3463 if (lsg->length == 0)
3464 trim_sg = 1;
3465
3466 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3467 qc->n_elem - 1, lsg->length, qc->pad_len);
3468 }
3469
3470 pre_n_elem = qc->n_elem;
3471 if (trim_sg && pre_n_elem)
3472 pre_n_elem--;
3473
3474 if (!pre_n_elem) {
3475 n_elem = 0;
3476 goto skip_map;
3477 }
3478
3479 dir = qc->dma_dir;
3480 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3481 if (n_elem < 1) {
3482 /* restore last sg */
3483 lsg->length += qc->pad_len;
3484 return -1;
3485 }
3486
3487 DPRINTK("%d sg elements mapped\n", n_elem);
3488
3489skip_map:
3490 qc->n_elem = n_elem;
3491
3492 return 0;
3493}
3494
3495/**
3496 * swap_buf_le16 - swap halves of 16-bit words in place
3497 * @buf: Buffer to swap
3498 * @buf_words: Number of 16-bit words in buffer.
3499 *
3500 * Swap halves of 16-bit words if needed to convert from
3501 * little-endian byte order to native cpu byte order, or
3502 * vice-versa.
3503 *
3504 * LOCKING:
3505 * Inherited from caller.
3506 */
3507void swap_buf_le16(u16 *buf, unsigned int buf_words)
3508{
3509#ifdef __BIG_ENDIAN
3510 unsigned int i;
3511
3512 for (i = 0; i < buf_words; i++)
3513 buf[i] = le16_to_cpu(buf[i]);
3514#endif /* __BIG_ENDIAN */
3515}
3516
3517/**
3518 * ata_mmio_data_xfer - Transfer data by MMIO
3519 * @adev: device for this I/O
3520 * @buf: data buffer
3521 * @buflen: buffer length
3522 * @write_data: read/write
3523 *
3524 * Transfer data from/to the device data register by MMIO.
3525 *
3526 * LOCKING:
3527 * Inherited from caller.
3528 */
3529
3530void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3531 unsigned int buflen, int write_data)
3532{
3533 struct ata_port *ap = adev->ap;
3534 unsigned int i;
3535 unsigned int words = buflen >> 1;
3536 u16 *buf16 = (u16 *) buf;
3537 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3538
3539 /* Transfer multiple of 2 bytes */
3540 if (write_data) {
3541 for (i = 0; i < words; i++)
3542 writew(le16_to_cpu(buf16[i]), mmio);
3543 } else {
3544 for (i = 0; i < words; i++)
3545 buf16[i] = cpu_to_le16(readw(mmio));
3546 }
3547
3548 /* Transfer trailing 1 byte, if any. */
3549 if (unlikely(buflen & 0x01)) {
3550 u16 align_buf[1] = { 0 };
3551 unsigned char *trailing_buf = buf + buflen - 1;
3552
3553 if (write_data) {
3554 memcpy(align_buf, trailing_buf, 1);
3555 writew(le16_to_cpu(align_buf[0]), mmio);
3556 } else {
3557 align_buf[0] = cpu_to_le16(readw(mmio));
3558 memcpy(trailing_buf, align_buf, 1);
3559 }
3560 }
3561}
3562
3563/**
3564 * ata_pio_data_xfer - Transfer data by PIO
3565 * @adev: device to target
3566 * @buf: data buffer
3567 * @buflen: buffer length
3568 * @write_data: read/write
3569 *
3570 * Transfer data from/to the device data register by PIO.
3571 *
3572 * LOCKING:
3573 * Inherited from caller.
3574 */
3575
3576void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3577 unsigned int buflen, int write_data)
3578{
3579 struct ata_port *ap = adev->ap;
3580 unsigned int words = buflen >> 1;
3581
3582 /* Transfer multiple of 2 bytes */
3583 if (write_data)
3584 outsw(ap->ioaddr.data_addr, buf, words);
3585 else
3586 insw(ap->ioaddr.data_addr, buf, words);
3587
3588 /* Transfer trailing 1 byte, if any. */
3589 if (unlikely(buflen & 0x01)) {
3590 u16 align_buf[1] = { 0 };
3591 unsigned char *trailing_buf = buf + buflen - 1;
3592
3593 if (write_data) {
3594 memcpy(align_buf, trailing_buf, 1);
3595 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3596 } else {
3597 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3598 memcpy(trailing_buf, align_buf, 1);
3599 }
3600 }
3601}
3602
3603/**
3604 * ata_pio_data_xfer_noirq - Transfer data by PIO
3605 * @adev: device to target
3606 * @buf: data buffer
3607 * @buflen: buffer length
3608 * @write_data: read/write
3609 *
3610 * Transfer data from/to the device data register by PIO. Do the
3611 * transfer with interrupts disabled.
3612 *
3613 * LOCKING:
3614 * Inherited from caller.
3615 */
3616
3617void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3618 unsigned int buflen, int write_data)
3619{
3620 unsigned long flags;
3621 local_irq_save(flags);
3622 ata_pio_data_xfer(adev, buf, buflen, write_data);
3623 local_irq_restore(flags);
3624}
3625
3626
3627/**
3628 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3629 * @qc: Command on going
3630 *
3631 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3632 *
3633 * LOCKING:
3634 * Inherited from caller.
3635 */
3636
3637static void ata_pio_sector(struct ata_queued_cmd *qc)
3638{
3639 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3640 struct scatterlist *sg = qc->__sg;
3641 struct ata_port *ap = qc->ap;
3642 struct page *page;
3643 unsigned int offset;
3644 unsigned char *buf;
3645
3646 if (qc->cursect == (qc->nsect - 1))
3647 ap->hsm_task_state = HSM_ST_LAST;
3648
3649 page = sg[qc->cursg].page;
3650 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3651
3652 /* get the current page and offset */
3653 page = nth_page(page, (offset >> PAGE_SHIFT));
3654 offset %= PAGE_SIZE;
3655
3656 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3657
3658 if (PageHighMem(page)) {
3659 unsigned long flags;
3660
3661 /* FIXME: use a bounce buffer */
3662 local_irq_save(flags);
3663 buf = kmap_atomic(page, KM_IRQ0);
3664
3665 /* do the actual data transfer */
3666 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3667
3668 kunmap_atomic(buf, KM_IRQ0);
3669 local_irq_restore(flags);
3670 } else {
3671 buf = page_address(page);
3672 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3673 }
3674
3675 qc->cursect++;
3676 qc->cursg_ofs++;
3677
3678 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3679 qc->cursg++;
3680 qc->cursg_ofs = 0;
3681 }
3682}
3683
3684/**
3685 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3686 * @qc: Command on going
3687 *
3688 * Transfer one or many ATA_SECT_SIZE of data from/to the
3689 * ATA device for the DRQ request.
3690 *
3691 * LOCKING:
3692 * Inherited from caller.
3693 */
3694
3695static void ata_pio_sectors(struct ata_queued_cmd *qc)
3696{
3697 if (is_multi_taskfile(&qc->tf)) {
3698 /* READ/WRITE MULTIPLE */
3699 unsigned int nsect;
3700
3701 WARN_ON(qc->dev->multi_count == 0);
3702
3703 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3704 while (nsect--)
3705 ata_pio_sector(qc);
3706 } else
3707 ata_pio_sector(qc);
3708}
3709
3710/**
3711 * atapi_send_cdb - Write CDB bytes to hardware
3712 * @ap: Port to which ATAPI device is attached.
3713 * @qc: Taskfile currently active
3714 *
3715 * When device has indicated its readiness to accept
3716 * a CDB, this function is called. Send the CDB.
3717 *
3718 * LOCKING:
3719 * caller.
3720 */
3721
3722static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3723{
3724 /* send SCSI cdb */
3725 DPRINTK("send cdb\n");
3726 WARN_ON(qc->dev->cdb_len < 12);
3727
3728 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3729 ata_altstatus(ap); /* flush */
3730
3731 switch (qc->tf.protocol) {
3732 case ATA_PROT_ATAPI:
3733 ap->hsm_task_state = HSM_ST;
3734 break;
3735 case ATA_PROT_ATAPI_NODATA:
3736 ap->hsm_task_state = HSM_ST_LAST;
3737 break;
3738 case ATA_PROT_ATAPI_DMA:
3739 ap->hsm_task_state = HSM_ST_LAST;
3740 /* initiate bmdma */
3741 ap->ops->bmdma_start(qc);
3742 break;
3743 }
3744}
3745
3746/**
3747 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3748 * @qc: Command on going
3749 * @bytes: number of bytes
3750 *
3751 * Transfer Transfer data from/to the ATAPI device.
3752 *
3753 * LOCKING:
3754 * Inherited from caller.
3755 *
3756 */
3757
3758static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3759{
3760 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3761 struct scatterlist *sg = qc->__sg;
3762 struct ata_port *ap = qc->ap;
3763 struct page *page;
3764 unsigned char *buf;
3765 unsigned int offset, count;
3766
3767 if (qc->curbytes + bytes >= qc->nbytes)
3768 ap->hsm_task_state = HSM_ST_LAST;
3769
3770next_sg:
3771 if (unlikely(qc->cursg >= qc->n_elem)) {
3772 /*
3773 * The end of qc->sg is reached and the device expects
3774 * more data to transfer. In order not to overrun qc->sg
3775 * and fulfill length specified in the byte count register,
3776 * - for read case, discard trailing data from the device
3777 * - for write case, padding zero data to the device
3778 */
3779 u16 pad_buf[1] = { 0 };
3780 unsigned int words = bytes >> 1;
3781 unsigned int i;
3782
3783 if (words) /* warning if bytes > 1 */
3784 ata_dev_printk(qc->dev, KERN_WARNING,
3785 "%u bytes trailing data\n", bytes);
3786
3787 for (i = 0; i < words; i++)
3788 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3789
3790 ap->hsm_task_state = HSM_ST_LAST;
3791 return;
3792 }
3793
3794 sg = &qc->__sg[qc->cursg];
3795
3796 page = sg->page;
3797 offset = sg->offset + qc->cursg_ofs;
3798
3799 /* get the current page and offset */
3800 page = nth_page(page, (offset >> PAGE_SHIFT));
3801 offset %= PAGE_SIZE;
3802
3803 /* don't overrun current sg */
3804 count = min(sg->length - qc->cursg_ofs, bytes);
3805
3806 /* don't cross page boundaries */
3807 count = min(count, (unsigned int)PAGE_SIZE - offset);
3808
3809 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3810
3811 if (PageHighMem(page)) {
3812 unsigned long flags;
3813
3814 /* FIXME: use bounce buffer */
3815 local_irq_save(flags);
3816 buf = kmap_atomic(page, KM_IRQ0);
3817
3818 /* do the actual data transfer */
3819 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3820
3821 kunmap_atomic(buf, KM_IRQ0);
3822 local_irq_restore(flags);
3823 } else {
3824 buf = page_address(page);
3825 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3826 }
3827
3828 bytes -= count;
3829 qc->curbytes += count;
3830 qc->cursg_ofs += count;
3831
3832 if (qc->cursg_ofs == sg->length) {
3833 qc->cursg++;
3834 qc->cursg_ofs = 0;
3835 }
3836
3837 if (bytes)
3838 goto next_sg;
3839}
3840
3841/**
3842 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3843 * @qc: Command on going
3844 *
3845 * Transfer Transfer data from/to the ATAPI device.
3846 *
3847 * LOCKING:
3848 * Inherited from caller.
3849 */
3850
3851static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3852{
3853 struct ata_port *ap = qc->ap;
3854 struct ata_device *dev = qc->dev;
3855 unsigned int ireason, bc_lo, bc_hi, bytes;
3856 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3857
3858 /* Abuse qc->result_tf for temp storage of intermediate TF
3859 * here to save some kernel stack usage.
3860 * For normal completion, qc->result_tf is not relevant. For
3861 * error, qc->result_tf is later overwritten by ata_qc_complete().
3862 * So, the correctness of qc->result_tf is not affected.
3863 */
3864 ap->ops->tf_read(ap, &qc->result_tf);
3865 ireason = qc->result_tf.nsect;
3866 bc_lo = qc->result_tf.lbam;
3867 bc_hi = qc->result_tf.lbah;
3868 bytes = (bc_hi << 8) | bc_lo;
3869
3870 /* shall be cleared to zero, indicating xfer of data */
3871 if (ireason & (1 << 0))
3872 goto err_out;
3873
3874 /* make sure transfer direction matches expected */
3875 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3876 if (do_write != i_write)
3877 goto err_out;
3878
3879 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3880
3881 __atapi_pio_bytes(qc, bytes);
3882
3883 return;
3884
3885err_out:
3886 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3887 qc->err_mask |= AC_ERR_HSM;
3888 ap->hsm_task_state = HSM_ST_ERR;
3889}
3890
3891/**
3892 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3893 * @ap: the target ata_port
3894 * @qc: qc on going
3895 *
3896 * RETURNS:
3897 * 1 if ok in workqueue, 0 otherwise.
3898 */
3899
3900static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3901{
3902 if (qc->tf.flags & ATA_TFLAG_POLLING)
3903 return 1;
3904
3905 if (ap->hsm_task_state == HSM_ST_FIRST) {
3906 if (qc->tf.protocol == ATA_PROT_PIO &&
3907 (qc->tf.flags & ATA_TFLAG_WRITE))
3908 return 1;
3909
3910 if (is_atapi_taskfile(&qc->tf) &&
3911 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3912 return 1;
3913 }
3914
3915 return 0;
3916}
3917
3918/**
3919 * ata_hsm_qc_complete - finish a qc running on standard HSM
3920 * @qc: Command to complete
3921 * @in_wq: 1 if called from workqueue, 0 otherwise
3922 *
3923 * Finish @qc which is running on standard HSM.
3924 *
3925 * LOCKING:
3926 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3927 * Otherwise, none on entry and grabs host lock.
3928 */
3929static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3930{
3931 struct ata_port *ap = qc->ap;
3932 unsigned long flags;
3933
3934 if (ap->ops->error_handler) {
3935 if (in_wq) {
3936 spin_lock_irqsave(ap->lock, flags);
3937
3938 /* EH might have kicked in while host_set lock
3939 * is released.
3940 */
3941 qc = ata_qc_from_tag(ap, qc->tag);
3942 if (qc) {
3943 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3944 ata_irq_on(ap);
3945 ata_qc_complete(qc);
3946 } else
3947 ata_port_freeze(ap);
3948 }
3949
3950 spin_unlock_irqrestore(ap->lock, flags);
3951 } else {
3952 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3953 ata_qc_complete(qc);
3954 else
3955 ata_port_freeze(ap);
3956 }
3957 } else {
3958 if (in_wq) {
3959 spin_lock_irqsave(ap->lock, flags);
3960 ata_irq_on(ap);
3961 ata_qc_complete(qc);
3962 spin_unlock_irqrestore(ap->lock, flags);
3963 } else
3964 ata_qc_complete(qc);
3965 }
3966
3967 ata_altstatus(ap); /* flush */
3968}
3969
3970/**
3971 * ata_hsm_move - move the HSM to the next state.
3972 * @ap: the target ata_port
3973 * @qc: qc on going
3974 * @status: current device status
3975 * @in_wq: 1 if called from workqueue, 0 otherwise
3976 *
3977 * RETURNS:
3978 * 1 when poll next status needed, 0 otherwise.
3979 */
3980int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3981 u8 status, int in_wq)
3982{
3983 unsigned long flags = 0;
3984 int poll_next;
3985
3986 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3987
3988 /* Make sure ata_qc_issue_prot() does not throw things
3989 * like DMA polling into the workqueue. Notice that
3990 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3991 */
3992 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3993
3994fsm_start:
3995 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3996 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3997
3998 switch (ap->hsm_task_state) {
3999 case HSM_ST_FIRST:
4000 /* Send first data block or PACKET CDB */
4001
4002 /* If polling, we will stay in the work queue after
4003 * sending the data. Otherwise, interrupt handler
4004 * takes over after sending the data.
4005 */
4006 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4007
4008 /* check device status */
4009 if (unlikely((status & ATA_DRQ) == 0)) {
4010 /* handle BSY=0, DRQ=0 as error */
4011 if (likely(status & (ATA_ERR | ATA_DF)))
4012 /* device stops HSM for abort/error */
4013 qc->err_mask |= AC_ERR_DEV;
4014 else
4015 /* HSM violation. Let EH handle this */
4016 qc->err_mask |= AC_ERR_HSM;
4017
4018 ap->hsm_task_state = HSM_ST_ERR;
4019 goto fsm_start;
4020 }
4021
4022 /* Device should not ask for data transfer (DRQ=1)
4023 * when it finds something wrong.
4024 * We ignore DRQ here and stop the HSM by
4025 * changing hsm_task_state to HSM_ST_ERR and
4026 * let the EH abort the command or reset the device.
4027 */
4028 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4029 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4030 ap->id, status);
4031 qc->err_mask |= AC_ERR_HSM;
4032 ap->hsm_task_state = HSM_ST_ERR;
4033 goto fsm_start;
4034 }
4035
4036 /* Send the CDB (atapi) or the first data block (ata pio out).
4037 * During the state transition, interrupt handler shouldn't
4038 * be invoked before the data transfer is complete and
4039 * hsm_task_state is changed. Hence, the following locking.
4040 */
4041 if (in_wq)
4042 spin_lock_irqsave(ap->lock, flags);
4043
4044 if (qc->tf.protocol == ATA_PROT_PIO) {
4045 /* PIO data out protocol.
4046 * send first data block.
4047 */
4048
4049 /* ata_pio_sectors() might change the state
4050 * to HSM_ST_LAST. so, the state is changed here
4051 * before ata_pio_sectors().
4052 */
4053 ap->hsm_task_state = HSM_ST;
4054 ata_pio_sectors(qc);
4055 ata_altstatus(ap); /* flush */
4056 } else
4057 /* send CDB */
4058 atapi_send_cdb(ap, qc);
4059
4060 if (in_wq)
4061 spin_unlock_irqrestore(ap->lock, flags);
4062
4063 /* if polling, ata_pio_task() handles the rest.
4064 * otherwise, interrupt handler takes over from here.
4065 */
4066 break;
4067
4068 case HSM_ST:
4069 /* complete command or read/write the data register */
4070 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4071 /* ATAPI PIO protocol */
4072 if ((status & ATA_DRQ) == 0) {
4073 /* No more data to transfer or device error.
4074 * Device error will be tagged in HSM_ST_LAST.
4075 */
4076 ap->hsm_task_state = HSM_ST_LAST;
4077 goto fsm_start;
4078 }
4079
4080 /* Device should not ask for data transfer (DRQ=1)
4081 * when it finds something wrong.
4082 * We ignore DRQ here and stop the HSM by
4083 * changing hsm_task_state to HSM_ST_ERR and
4084 * let the EH abort the command or reset the device.
4085 */
4086 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4087 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4088 ap->id, status);
4089 qc->err_mask |= AC_ERR_HSM;
4090 ap->hsm_task_state = HSM_ST_ERR;
4091 goto fsm_start;
4092 }
4093
4094 atapi_pio_bytes(qc);
4095
4096 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4097 /* bad ireason reported by device */
4098 goto fsm_start;
4099
4100 } else {
4101 /* ATA PIO protocol */
4102 if (unlikely((status & ATA_DRQ) == 0)) {
4103 /* handle BSY=0, DRQ=0 as error */
4104 if (likely(status & (ATA_ERR | ATA_DF)))
4105 /* device stops HSM for abort/error */
4106 qc->err_mask |= AC_ERR_DEV;
4107 else
4108 /* HSM violation. Let EH handle this */
4109 qc->err_mask |= AC_ERR_HSM;
4110
4111 ap->hsm_task_state = HSM_ST_ERR;
4112 goto fsm_start;
4113 }
4114
4115 /* For PIO reads, some devices may ask for
4116 * data transfer (DRQ=1) alone with ERR=1.
4117 * We respect DRQ here and transfer one
4118 * block of junk data before changing the
4119 * hsm_task_state to HSM_ST_ERR.
4120 *
4121 * For PIO writes, ERR=1 DRQ=1 doesn't make
4122 * sense since the data block has been
4123 * transferred to the device.
4124 */
4125 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4126 /* data might be corrputed */
4127 qc->err_mask |= AC_ERR_DEV;
4128
4129 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4130 ata_pio_sectors(qc);
4131 ata_altstatus(ap);
4132 status = ata_wait_idle(ap);
4133 }
4134
4135 if (status & (ATA_BUSY | ATA_DRQ))
4136 qc->err_mask |= AC_ERR_HSM;
4137
4138 /* ata_pio_sectors() might change the
4139 * state to HSM_ST_LAST. so, the state
4140 * is changed after ata_pio_sectors().
4141 */
4142 ap->hsm_task_state = HSM_ST_ERR;
4143 goto fsm_start;
4144 }
4145
4146 ata_pio_sectors(qc);
4147
4148 if (ap->hsm_task_state == HSM_ST_LAST &&
4149 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4150 /* all data read */
4151 ata_altstatus(ap);
4152 status = ata_wait_idle(ap);
4153 goto fsm_start;
4154 }
4155 }
4156
4157 ata_altstatus(ap); /* flush */
4158 poll_next = 1;
4159 break;
4160
4161 case HSM_ST_LAST:
4162 if (unlikely(!ata_ok(status))) {
4163 qc->err_mask |= __ac_err_mask(status);
4164 ap->hsm_task_state = HSM_ST_ERR;
4165 goto fsm_start;
4166 }
4167
4168 /* no more data to transfer */
4169 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4170 ap->id, qc->dev->devno, status);
4171
4172 WARN_ON(qc->err_mask);
4173
4174 ap->hsm_task_state = HSM_ST_IDLE;
4175
4176 /* complete taskfile transaction */
4177 ata_hsm_qc_complete(qc, in_wq);
4178
4179 poll_next = 0;
4180 break;
4181
4182 case HSM_ST_ERR:
4183 /* make sure qc->err_mask is available to
4184 * know what's wrong and recover
4185 */
4186 WARN_ON(qc->err_mask == 0);
4187
4188 ap->hsm_task_state = HSM_ST_IDLE;
4189
4190 /* complete taskfile transaction */
4191 ata_hsm_qc_complete(qc, in_wq);
4192
4193 poll_next = 0;
4194 break;
4195 default:
4196 poll_next = 0;
4197 BUG();
4198 }
4199
4200 return poll_next;
4201}
4202
4203static void ata_pio_task(void *_data)
4204{
4205 struct ata_queued_cmd *qc = _data;
4206 struct ata_port *ap = qc->ap;
4207 u8 status;
4208 int poll_next;
4209
4210fsm_start:
4211 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4212
4213 /*
4214 * This is purely heuristic. This is a fast path.
4215 * Sometimes when we enter, BSY will be cleared in
4216 * a chk-status or two. If not, the drive is probably seeking
4217 * or something. Snooze for a couple msecs, then
4218 * chk-status again. If still busy, queue delayed work.
4219 */
4220 status = ata_busy_wait(ap, ATA_BUSY, 5);
4221 if (status & ATA_BUSY) {
4222 msleep(2);
4223 status = ata_busy_wait(ap, ATA_BUSY, 10);
4224 if (status & ATA_BUSY) {
4225 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4226 return;
4227 }
4228 }
4229
4230 /* move the HSM */
4231 poll_next = ata_hsm_move(ap, qc, status, 1);
4232
4233 /* another command or interrupt handler
4234 * may be running at this point.
4235 */
4236 if (poll_next)
4237 goto fsm_start;
4238}
4239
4240/**
4241 * ata_qc_new - Request an available ATA command, for queueing
4242 * @ap: Port associated with device @dev
4243 * @dev: Device from whom we request an available command structure
4244 *
4245 * LOCKING:
4246 * None.
4247 */
4248
4249static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4250{
4251 struct ata_queued_cmd *qc = NULL;
4252 unsigned int i;
4253
4254 /* no command while frozen */
4255 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4256 return NULL;
4257
4258 /* the last tag is reserved for internal command. */
4259 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4260 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4261 qc = __ata_qc_from_tag(ap, i);
4262 break;
4263 }
4264
4265 if (qc)
4266 qc->tag = i;
4267
4268 return qc;
4269}
4270
4271/**
4272 * ata_qc_new_init - Request an available ATA command, and initialize it
4273 * @dev: Device from whom we request an available command structure
4274 *
4275 * LOCKING:
4276 * None.
4277 */
4278
4279struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4280{
4281 struct ata_port *ap = dev->ap;
4282 struct ata_queued_cmd *qc;
4283
4284 qc = ata_qc_new(ap);
4285 if (qc) {
4286 qc->scsicmd = NULL;
4287 qc->ap = ap;
4288 qc->dev = dev;
4289
4290 ata_qc_reinit(qc);
4291 }
4292
4293 return qc;
4294}
4295
4296/**
4297 * ata_qc_free - free unused ata_queued_cmd
4298 * @qc: Command to complete
4299 *
4300 * Designed to free unused ata_queued_cmd object
4301 * in case something prevents using it.
4302 *
4303 * LOCKING:
4304 * spin_lock_irqsave(host_set lock)
4305 */
4306void ata_qc_free(struct ata_queued_cmd *qc)
4307{
4308 struct ata_port *ap = qc->ap;
4309 unsigned int tag;
4310
4311 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4312
4313 qc->flags = 0;
4314 tag = qc->tag;
4315 if (likely(ata_tag_valid(tag))) {
4316 qc->tag = ATA_TAG_POISON;
4317 clear_bit(tag, &ap->qc_allocated);
4318 }
4319}
4320
4321void __ata_qc_complete(struct ata_queued_cmd *qc)
4322{
4323 struct ata_port *ap = qc->ap;
4324
4325 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4326 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4327
4328 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4329 ata_sg_clean(qc);
4330
4331 /* command should be marked inactive atomically with qc completion */
4332 if (qc->tf.protocol == ATA_PROT_NCQ)
4333 ap->sactive &= ~(1 << qc->tag);
4334 else
4335 ap->active_tag = ATA_TAG_POISON;
4336
4337 /* atapi: mark qc as inactive to prevent the interrupt handler
4338 * from completing the command twice later, before the error handler
4339 * is called. (when rc != 0 and atapi request sense is needed)
4340 */
4341 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4342 ap->qc_active &= ~(1 << qc->tag);
4343
4344 /* call completion callback */
4345 qc->complete_fn(qc);
4346}
4347
4348/**
4349 * ata_qc_complete - Complete an active ATA command
4350 * @qc: Command to complete
4351 * @err_mask: ATA Status register contents
4352 *
4353 * Indicate to the mid and upper layers that an ATA
4354 * command has completed, with either an ok or not-ok status.
4355 *
4356 * LOCKING:
4357 * spin_lock_irqsave(host_set lock)
4358 */
4359void ata_qc_complete(struct ata_queued_cmd *qc)
4360{
4361 struct ata_port *ap = qc->ap;
4362
4363 /* XXX: New EH and old EH use different mechanisms to
4364 * synchronize EH with regular execution path.
4365 *
4366 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4367 * Normal execution path is responsible for not accessing a
4368 * failed qc. libata core enforces the rule by returning NULL
4369 * from ata_qc_from_tag() for failed qcs.
4370 *
4371 * Old EH depends on ata_qc_complete() nullifying completion
4372 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4373 * not synchronize with interrupt handler. Only PIO task is
4374 * taken care of.
4375 */
4376 if (ap->ops->error_handler) {
4377 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4378
4379 if (unlikely(qc->err_mask))
4380 qc->flags |= ATA_QCFLAG_FAILED;
4381
4382 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4383 if (!ata_tag_internal(qc->tag)) {
4384 /* always fill result TF for failed qc */
4385 ap->ops->tf_read(ap, &qc->result_tf);
4386 ata_qc_schedule_eh(qc);
4387 return;
4388 }
4389 }
4390
4391 /* read result TF if requested */
4392 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4393 ap->ops->tf_read(ap, &qc->result_tf);
4394
4395 __ata_qc_complete(qc);
4396 } else {
4397 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4398 return;
4399
4400 /* read result TF if failed or requested */
4401 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4402 ap->ops->tf_read(ap, &qc->result_tf);
4403
4404 __ata_qc_complete(qc);
4405 }
4406}
4407
4408/**
4409 * ata_qc_complete_multiple - Complete multiple qcs successfully
4410 * @ap: port in question
4411 * @qc_active: new qc_active mask
4412 * @finish_qc: LLDD callback invoked before completing a qc
4413 *
4414 * Complete in-flight commands. This functions is meant to be
4415 * called from low-level driver's interrupt routine to complete
4416 * requests normally. ap->qc_active and @qc_active is compared
4417 * and commands are completed accordingly.
4418 *
4419 * LOCKING:
4420 * spin_lock_irqsave(host_set lock)
4421 *
4422 * RETURNS:
4423 * Number of completed commands on success, -errno otherwise.
4424 */
4425int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4426 void (*finish_qc)(struct ata_queued_cmd *))
4427{
4428 int nr_done = 0;
4429 u32 done_mask;
4430 int i;
4431
4432 done_mask = ap->qc_active ^ qc_active;
4433
4434 if (unlikely(done_mask & qc_active)) {
4435 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4436 "(%08x->%08x)\n", ap->qc_active, qc_active);
4437 return -EINVAL;
4438 }
4439
4440 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4441 struct ata_queued_cmd *qc;
4442
4443 if (!(done_mask & (1 << i)))
4444 continue;
4445
4446 if ((qc = ata_qc_from_tag(ap, i))) {
4447 if (finish_qc)
4448 finish_qc(qc);
4449 ata_qc_complete(qc);
4450 nr_done++;
4451 }
4452 }
4453
4454 return nr_done;
4455}
4456
4457static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4458{
4459 struct ata_port *ap = qc->ap;
4460
4461 switch (qc->tf.protocol) {
4462 case ATA_PROT_NCQ:
4463 case ATA_PROT_DMA:
4464 case ATA_PROT_ATAPI_DMA:
4465 return 1;
4466
4467 case ATA_PROT_ATAPI:
4468 case ATA_PROT_PIO:
4469 if (ap->flags & ATA_FLAG_PIO_DMA)
4470 return 1;
4471
4472 /* fall through */
4473
4474 default:
4475 return 0;
4476 }
4477
4478 /* never reached */
4479}
4480
4481/**
4482 * ata_qc_issue - issue taskfile to device
4483 * @qc: command to issue to device
4484 *
4485 * Prepare an ATA command to submission to device.
4486 * This includes mapping the data into a DMA-able
4487 * area, filling in the S/G table, and finally
4488 * writing the taskfile to hardware, starting the command.
4489 *
4490 * LOCKING:
4491 * spin_lock_irqsave(host_set lock)
4492 */
4493void ata_qc_issue(struct ata_queued_cmd *qc)
4494{
4495 struct ata_port *ap = qc->ap;
4496
4497 /* Make sure only one non-NCQ command is outstanding. The
4498 * check is skipped for old EH because it reuses active qc to
4499 * request ATAPI sense.
4500 */
4501 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4502
4503 if (qc->tf.protocol == ATA_PROT_NCQ) {
4504 WARN_ON(ap->sactive & (1 << qc->tag));
4505 ap->sactive |= 1 << qc->tag;
4506 } else {
4507 WARN_ON(ap->sactive);
4508 ap->active_tag = qc->tag;
4509 }
4510
4511 qc->flags |= ATA_QCFLAG_ACTIVE;
4512 ap->qc_active |= 1 << qc->tag;
4513
4514 if (ata_should_dma_map(qc)) {
4515 if (qc->flags & ATA_QCFLAG_SG) {
4516 if (ata_sg_setup(qc))
4517 goto sg_err;
4518 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4519 if (ata_sg_setup_one(qc))
4520 goto sg_err;
4521 }
4522 } else {
4523 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4524 }
4525
4526 ap->ops->qc_prep(qc);
4527
4528 qc->err_mask |= ap->ops->qc_issue(qc);
4529 if (unlikely(qc->err_mask))
4530 goto err;
4531 return;
4532
4533sg_err:
4534 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4535 qc->err_mask |= AC_ERR_SYSTEM;
4536err:
4537 ata_qc_complete(qc);
4538}
4539
4540/**
4541 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4542 * @qc: command to issue to device
4543 *
4544 * Using various libata functions and hooks, this function
4545 * starts an ATA command. ATA commands are grouped into
4546 * classes called "protocols", and issuing each type of protocol
4547 * is slightly different.
4548 *
4549 * May be used as the qc_issue() entry in ata_port_operations.
4550 *
4551 * LOCKING:
4552 * spin_lock_irqsave(host_set lock)
4553 *
4554 * RETURNS:
4555 * Zero on success, AC_ERR_* mask on failure
4556 */
4557
4558unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4559{
4560 struct ata_port *ap = qc->ap;
4561
4562 /* Use polling pio if the LLD doesn't handle
4563 * interrupt driven pio and atapi CDB interrupt.
4564 */
4565 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4566 switch (qc->tf.protocol) {
4567 case ATA_PROT_PIO:
4568 case ATA_PROT_ATAPI:
4569 case ATA_PROT_ATAPI_NODATA:
4570 qc->tf.flags |= ATA_TFLAG_POLLING;
4571 break;
4572 case ATA_PROT_ATAPI_DMA:
4573 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4574 /* see ata_dma_blacklisted() */
4575 BUG();
4576 break;
4577 default:
4578 break;
4579 }
4580 }
4581
4582 /* select the device */
4583 ata_dev_select(ap, qc->dev->devno, 1, 0);
4584
4585 /* start the command */
4586 switch (qc->tf.protocol) {
4587 case ATA_PROT_NODATA:
4588 if (qc->tf.flags & ATA_TFLAG_POLLING)
4589 ata_qc_set_polling(qc);
4590
4591 ata_tf_to_host(ap, &qc->tf);
4592 ap->hsm_task_state = HSM_ST_LAST;
4593
4594 if (qc->tf.flags & ATA_TFLAG_POLLING)
4595 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4596
4597 break;
4598
4599 case ATA_PROT_DMA:
4600 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4601
4602 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4603 ap->ops->bmdma_setup(qc); /* set up bmdma */
4604 ap->ops->bmdma_start(qc); /* initiate bmdma */
4605 ap->hsm_task_state = HSM_ST_LAST;
4606 break;
4607
4608 case ATA_PROT_PIO:
4609 if (qc->tf.flags & ATA_TFLAG_POLLING)
4610 ata_qc_set_polling(qc);
4611
4612 ata_tf_to_host(ap, &qc->tf);
4613
4614 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4615 /* PIO data out protocol */
4616 ap->hsm_task_state = HSM_ST_FIRST;
4617 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4618
4619 /* always send first data block using
4620 * the ata_pio_task() codepath.
4621 */
4622 } else {
4623 /* PIO data in protocol */
4624 ap->hsm_task_state = HSM_ST;
4625
4626 if (qc->tf.flags & ATA_TFLAG_POLLING)
4627 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4628
4629 /* if polling, ata_pio_task() handles the rest.
4630 * otherwise, interrupt handler takes over from here.
4631 */
4632 }
4633
4634 break;
4635
4636 case ATA_PROT_ATAPI:
4637 case ATA_PROT_ATAPI_NODATA:
4638 if (qc->tf.flags & ATA_TFLAG_POLLING)
4639 ata_qc_set_polling(qc);
4640
4641 ata_tf_to_host(ap, &qc->tf);
4642
4643 ap->hsm_task_state = HSM_ST_FIRST;
4644
4645 /* send cdb by polling if no cdb interrupt */
4646 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4647 (qc->tf.flags & ATA_TFLAG_POLLING))
4648 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4649 break;
4650
4651 case ATA_PROT_ATAPI_DMA:
4652 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4653
4654 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4655 ap->ops->bmdma_setup(qc); /* set up bmdma */
4656 ap->hsm_task_state = HSM_ST_FIRST;
4657
4658 /* send cdb by polling if no cdb interrupt */
4659 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4660 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4661 break;
4662
4663 default:
4664 WARN_ON(1);
4665 return AC_ERR_SYSTEM;
4666 }
4667
4668 return 0;
4669}
4670
4671/**
4672 * ata_host_intr - Handle host interrupt for given (port, task)
4673 * @ap: Port on which interrupt arrived (possibly...)
4674 * @qc: Taskfile currently active in engine
4675 *
4676 * Handle host interrupt for given queued command. Currently,
4677 * only DMA interrupts are handled. All other commands are
4678 * handled via polling with interrupts disabled (nIEN bit).
4679 *
4680 * LOCKING:
4681 * spin_lock_irqsave(host_set lock)
4682 *
4683 * RETURNS:
4684 * One if interrupt was handled, zero if not (shared irq).
4685 */
4686
4687inline unsigned int ata_host_intr (struct ata_port *ap,
4688 struct ata_queued_cmd *qc)
4689{
4690 u8 status, host_stat = 0;
4691
4692 VPRINTK("ata%u: protocol %d task_state %d\n",
4693 ap->id, qc->tf.protocol, ap->hsm_task_state);
4694
4695 /* Check whether we are expecting interrupt in this state */
4696 switch (ap->hsm_task_state) {
4697 case HSM_ST_FIRST:
4698 /* Some pre-ATAPI-4 devices assert INTRQ
4699 * at this state when ready to receive CDB.
4700 */
4701
4702 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4703 * The flag was turned on only for atapi devices.
4704 * No need to check is_atapi_taskfile(&qc->tf) again.
4705 */
4706 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4707 goto idle_irq;
4708 break;
4709 case HSM_ST_LAST:
4710 if (qc->tf.protocol == ATA_PROT_DMA ||
4711 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4712 /* check status of DMA engine */
4713 host_stat = ap->ops->bmdma_status(ap);
4714 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4715
4716 /* if it's not our irq... */
4717 if (!(host_stat & ATA_DMA_INTR))
4718 goto idle_irq;
4719
4720 /* before we do anything else, clear DMA-Start bit */
4721 ap->ops->bmdma_stop(qc);
4722
4723 if (unlikely(host_stat & ATA_DMA_ERR)) {
4724 /* error when transfering data to/from memory */
4725 qc->err_mask |= AC_ERR_HOST_BUS;
4726 ap->hsm_task_state = HSM_ST_ERR;
4727 }
4728 }
4729 break;
4730 case HSM_ST:
4731 break;
4732 default:
4733 goto idle_irq;
4734 }
4735
4736 /* check altstatus */
4737 status = ata_altstatus(ap);
4738 if (status & ATA_BUSY)
4739 goto idle_irq;
4740
4741 /* check main status, clearing INTRQ */
4742 status = ata_chk_status(ap);
4743 if (unlikely(status & ATA_BUSY))
4744 goto idle_irq;
4745
4746 /* ack bmdma irq events */
4747 ap->ops->irq_clear(ap);
4748
4749 ata_hsm_move(ap, qc, status, 0);
4750 return 1; /* irq handled */
4751
4752idle_irq:
4753 ap->stats.idle_irq++;
4754
4755#ifdef ATA_IRQ_TRAP
4756 if ((ap->stats.idle_irq % 1000) == 0) {
4757 ata_irq_ack(ap, 0); /* debug trap */
4758 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4759 return 1;
4760 }
4761#endif
4762 return 0; /* irq not handled */
4763}
4764
4765/**
4766 * ata_interrupt - Default ATA host interrupt handler
4767 * @irq: irq line (unused)
4768 * @dev_instance: pointer to our ata_host_set information structure
4769 * @regs: unused
4770 *
4771 * Default interrupt handler for PCI IDE devices. Calls
4772 * ata_host_intr() for each port that is not disabled.
4773 *
4774 * LOCKING:
4775 * Obtains host_set lock during operation.
4776 *
4777 * RETURNS:
4778 * IRQ_NONE or IRQ_HANDLED.
4779 */
4780
4781irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4782{
4783 struct ata_host_set *host_set = dev_instance;
4784 unsigned int i;
4785 unsigned int handled = 0;
4786 unsigned long flags;
4787
4788 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4789 spin_lock_irqsave(&host_set->lock, flags);
4790
4791 for (i = 0; i < host_set->n_ports; i++) {
4792 struct ata_port *ap;
4793
4794 ap = host_set->ports[i];
4795 if (ap &&
4796 !(ap->flags & ATA_FLAG_DISABLED)) {
4797 struct ata_queued_cmd *qc;
4798
4799 qc = ata_qc_from_tag(ap, ap->active_tag);
4800 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4801 (qc->flags & ATA_QCFLAG_ACTIVE))
4802 handled |= ata_host_intr(ap, qc);
4803 }
4804 }
4805
4806 spin_unlock_irqrestore(&host_set->lock, flags);
4807
4808 return IRQ_RETVAL(handled);
4809}
4810
4811/**
4812 * sata_scr_valid - test whether SCRs are accessible
4813 * @ap: ATA port to test SCR accessibility for
4814 *
4815 * Test whether SCRs are accessible for @ap.
4816 *
4817 * LOCKING:
4818 * None.
4819 *
4820 * RETURNS:
4821 * 1 if SCRs are accessible, 0 otherwise.
4822 */
4823int sata_scr_valid(struct ata_port *ap)
4824{
4825 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4826}
4827
4828/**
4829 * sata_scr_read - read SCR register of the specified port
4830 * @ap: ATA port to read SCR for
4831 * @reg: SCR to read
4832 * @val: Place to store read value
4833 *
4834 * Read SCR register @reg of @ap into *@val. This function is
4835 * guaranteed to succeed if the cable type of the port is SATA
4836 * and the port implements ->scr_read.
4837 *
4838 * LOCKING:
4839 * None.
4840 *
4841 * RETURNS:
4842 * 0 on success, negative errno on failure.
4843 */
4844int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4845{
4846 if (sata_scr_valid(ap)) {
4847 *val = ap->ops->scr_read(ap, reg);
4848 return 0;
4849 }
4850 return -EOPNOTSUPP;
4851}
4852
4853/**
4854 * sata_scr_write - write SCR register of the specified port
4855 * @ap: ATA port to write SCR for
4856 * @reg: SCR to write
4857 * @val: value to write
4858 *
4859 * Write @val to SCR register @reg of @ap. This function is
4860 * guaranteed to succeed if the cable type of the port is SATA
4861 * and the port implements ->scr_read.
4862 *
4863 * LOCKING:
4864 * None.
4865 *
4866 * RETURNS:
4867 * 0 on success, negative errno on failure.
4868 */
4869int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4870{
4871 if (sata_scr_valid(ap)) {
4872 ap->ops->scr_write(ap, reg, val);
4873 return 0;
4874 }
4875 return -EOPNOTSUPP;
4876}
4877
4878/**
4879 * sata_scr_write_flush - write SCR register of the specified port and flush
4880 * @ap: ATA port to write SCR for
4881 * @reg: SCR to write
4882 * @val: value to write
4883 *
4884 * This function is identical to sata_scr_write() except that this
4885 * function performs flush after writing to the register.
4886 *
4887 * LOCKING:
4888 * None.
4889 *
4890 * RETURNS:
4891 * 0 on success, negative errno on failure.
4892 */
4893int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4894{
4895 if (sata_scr_valid(ap)) {
4896 ap->ops->scr_write(ap, reg, val);
4897 ap->ops->scr_read(ap, reg);
4898 return 0;
4899 }
4900 return -EOPNOTSUPP;
4901}
4902
4903/**
4904 * ata_port_online - test whether the given port is online
4905 * @ap: ATA port to test
4906 *
4907 * Test whether @ap is online. Note that this function returns 0
4908 * if online status of @ap cannot be obtained, so
4909 * ata_port_online(ap) != !ata_port_offline(ap).
4910 *
4911 * LOCKING:
4912 * None.
4913 *
4914 * RETURNS:
4915 * 1 if the port online status is available and online.
4916 */
4917int ata_port_online(struct ata_port *ap)
4918{
4919 u32 sstatus;
4920
4921 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4922 return 1;
4923 return 0;
4924}
4925
4926/**
4927 * ata_port_offline - test whether the given port is offline
4928 * @ap: ATA port to test
4929 *
4930 * Test whether @ap is offline. Note that this function returns
4931 * 0 if offline status of @ap cannot be obtained, so
4932 * ata_port_online(ap) != !ata_port_offline(ap).
4933 *
4934 * LOCKING:
4935 * None.
4936 *
4937 * RETURNS:
4938 * 1 if the port offline status is available and offline.
4939 */
4940int ata_port_offline(struct ata_port *ap)
4941{
4942 u32 sstatus;
4943
4944 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4945 return 1;
4946 return 0;
4947}
4948
4949/*
4950 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4951 * without filling any other registers
4952 */
4953static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4954{
4955 struct ata_taskfile tf;
4956 int err;
4957
4958 ata_tf_init(dev, &tf);
4959
4960 tf.command = cmd;
4961 tf.flags |= ATA_TFLAG_DEVICE;
4962 tf.protocol = ATA_PROT_NODATA;
4963
4964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4965 if (err)
4966 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4967 __FUNCTION__, err);
4968
4969 return err;
4970}
4971
4972static int ata_flush_cache(struct ata_device *dev)
4973{
4974 u8 cmd;
4975
4976 if (!ata_try_flush_cache(dev))
4977 return 0;
4978
4979 if (ata_id_has_flush_ext(dev->id))
4980 cmd = ATA_CMD_FLUSH_EXT;
4981 else
4982 cmd = ATA_CMD_FLUSH;
4983
4984 return ata_do_simple_cmd(dev, cmd);
4985}
4986
4987static int ata_standby_drive(struct ata_device *dev)
4988{
4989 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4990}
4991
4992static int ata_start_drive(struct ata_device *dev)
4993{
4994 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4995}
4996
4997/**
4998 * ata_device_resume - wakeup a previously suspended devices
4999 * @dev: the device to resume
5000 *
5001 * Kick the drive back into action, by sending it an idle immediate
5002 * command and making sure its transfer mode matches between drive
5003 * and host.
5004 *
5005 */
5006int ata_device_resume(struct ata_device *dev)
5007{
5008 struct ata_port *ap = dev->ap;
5009
5010 if (ap->flags & ATA_FLAG_SUSPENDED) {
5011 struct ata_device *failed_dev;
5012
5013 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
5014 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5015
5016 ap->flags &= ~ATA_FLAG_SUSPENDED;
5017 while (ata_set_mode(ap, &failed_dev))
5018 ata_dev_disable(failed_dev);
5019 }
5020 if (!ata_dev_enabled(dev))
5021 return 0;
5022 if (dev->class == ATA_DEV_ATA)
5023 ata_start_drive(dev);
5024
5025 return 0;
5026}
5027
5028/**
5029 * ata_device_suspend - prepare a device for suspend
5030 * @dev: the device to suspend
5031 * @state: target power management state
5032 *
5033 * Flush the cache on the drive, if appropriate, then issue a
5034 * standbynow command.
5035 */
5036int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5037{
5038 struct ata_port *ap = dev->ap;
5039
5040 if (!ata_dev_enabled(dev))
5041 return 0;
5042 if (dev->class == ATA_DEV_ATA)
5043 ata_flush_cache(dev);
5044
5045 if (state.event != PM_EVENT_FREEZE)
5046 ata_standby_drive(dev);
5047 ap->flags |= ATA_FLAG_SUSPENDED;
5048 return 0;
5049}
5050
5051/**
5052 * ata_port_start - Set port up for dma.
5053 * @ap: Port to initialize
5054 *
5055 * Called just after data structures for each port are
5056 * initialized. Allocates space for PRD table.
5057 *
5058 * May be used as the port_start() entry in ata_port_operations.
5059 *
5060 * LOCKING:
5061 * Inherited from caller.
5062 */
5063
5064int ata_port_start (struct ata_port *ap)
5065{
5066 struct device *dev = ap->dev;
5067 int rc;
5068
5069 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5070 if (!ap->prd)
5071 return -ENOMEM;
5072
5073 rc = ata_pad_alloc(ap, dev);
5074 if (rc) {
5075 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5076 return rc;
5077 }
5078
5079 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5080
5081 return 0;
5082}
5083
5084
5085/**
5086 * ata_port_stop - Undo ata_port_start()
5087 * @ap: Port to shut down
5088 *
5089 * Frees the PRD table.
5090 *
5091 * May be used as the port_stop() entry in ata_port_operations.
5092 *
5093 * LOCKING:
5094 * Inherited from caller.
5095 */
5096
5097void ata_port_stop (struct ata_port *ap)
5098{
5099 struct device *dev = ap->dev;
5100
5101 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5102 ata_pad_free(ap, dev);
5103}
5104
5105void ata_host_stop (struct ata_host_set *host_set)
5106{
5107 if (host_set->mmio_base)
5108 iounmap(host_set->mmio_base);
5109}
5110
5111
5112/**
5113 * ata_host_remove - Unregister SCSI host structure with upper layers
5114 * @ap: Port to unregister
5115 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5116 *
5117 * LOCKING:
5118 * Inherited from caller.
5119 */
5120
5121static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5122{
5123 struct Scsi_Host *sh = ap->host;
5124
5125 DPRINTK("ENTER\n");
5126
5127 if (do_unregister)
5128 scsi_remove_host(sh);
5129
5130 ap->ops->port_stop(ap);
5131}
5132
5133/**
5134 * ata_dev_init - Initialize an ata_device structure
5135 * @dev: Device structure to initialize
5136 *
5137 * Initialize @dev in preparation for probing.
5138 *
5139 * LOCKING:
5140 * Inherited from caller.
5141 */
5142void ata_dev_init(struct ata_device *dev)
5143{
5144 struct ata_port *ap = dev->ap;
5145 unsigned long flags;
5146
5147 /* SATA spd limit is bound to the first device */
5148 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5149
5150 /* High bits of dev->flags are used to record warm plug
5151 * requests which occur asynchronously. Synchronize using
5152 * host_set lock.
5153 */
5154 spin_lock_irqsave(ap->lock, flags);
5155 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5156 spin_unlock_irqrestore(ap->lock, flags);
5157
5158 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5159 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5160 dev->pio_mask = UINT_MAX;
5161 dev->mwdma_mask = UINT_MAX;
5162 dev->udma_mask = UINT_MAX;
5163}
5164
5165/**
5166 * ata_host_init - Initialize an ata_port structure
5167 * @ap: Structure to initialize
5168 * @host: associated SCSI mid-layer structure
5169 * @host_set: Collection of hosts to which @ap belongs
5170 * @ent: Probe information provided by low-level driver
5171 * @port_no: Port number associated with this ata_port
5172 *
5173 * Initialize a new ata_port structure, and its associated
5174 * scsi_host.
5175 *
5176 * LOCKING:
5177 * Inherited from caller.
5178 */
5179static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5180 struct ata_host_set *host_set,
5181 const struct ata_probe_ent *ent, unsigned int port_no)
5182{
5183 unsigned int i;
5184
5185 host->max_id = 16;
5186 host->max_lun = 1;
5187 host->max_channel = 1;
5188 host->unique_id = ata_unique_id++;
5189 host->max_cmd_len = 12;
5190
5191 ap->lock = &host_set->lock;
5192 ap->flags = ATA_FLAG_DISABLED;
5193 ap->id = host->unique_id;
5194 ap->host = host;
5195 ap->ctl = ATA_DEVCTL_OBS;
5196 ap->host_set = host_set;
5197 ap->dev = ent->dev;
5198 ap->port_no = port_no;
5199 ap->hard_port_no =
5200 ent->legacy_mode ? ent->hard_port_no : port_no;
5201 ap->pio_mask = ent->pio_mask;
5202 ap->mwdma_mask = ent->mwdma_mask;
5203 ap->udma_mask = ent->udma_mask;
5204 ap->flags |= ent->host_flags;
5205 ap->ops = ent->port_ops;
5206 ap->hw_sata_spd_limit = UINT_MAX;
5207 ap->active_tag = ATA_TAG_POISON;
5208 ap->last_ctl = 0xFF;
5209
5210#if defined(ATA_VERBOSE_DEBUG)
5211 /* turn on all debugging levels */
5212 ap->msg_enable = 0x00FF;
5213#elif defined(ATA_DEBUG)
5214 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5215#else
5216 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5217#endif
5218
5219 INIT_WORK(&ap->port_task, NULL, NULL);
5220 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5221 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5222 INIT_LIST_HEAD(&ap->eh_done_q);
5223 init_waitqueue_head(&ap->eh_wait_q);
5224
5225 /* set cable type */
5226 ap->cbl = ATA_CBL_NONE;
5227 if (ap->flags & ATA_FLAG_SATA)
5228 ap->cbl = ATA_CBL_SATA;
5229
5230 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5231 struct ata_device *dev = &ap->device[i];
5232 dev->ap = ap;
5233 dev->devno = i;
5234 ata_dev_init(dev);
5235 }
5236
5237#ifdef ATA_IRQ_TRAP
5238 ap->stats.unhandled_irq = 1;
5239 ap->stats.idle_irq = 1;
5240#endif
5241
5242 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5243}
5244
5245/**
5246 * ata_host_add - Attach low-level ATA driver to system
5247 * @ent: Information provided by low-level driver
5248 * @host_set: Collections of ports to which we add
5249 * @port_no: Port number associated with this host
5250 *
5251 * Attach low-level ATA driver to system.
5252 *
5253 * LOCKING:
5254 * PCI/etc. bus probe sem.
5255 *
5256 * RETURNS:
5257 * New ata_port on success, for NULL on error.
5258 */
5259
5260static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5261 struct ata_host_set *host_set,
5262 unsigned int port_no)
5263{
5264 struct Scsi_Host *host;
5265 struct ata_port *ap;
5266 int rc;
5267
5268 DPRINTK("ENTER\n");
5269
5270 if (!ent->port_ops->error_handler &&
5271 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5272 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5273 port_no);
5274 return NULL;
5275 }
5276
5277 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5278 if (!host)
5279 return NULL;
5280
5281 host->transportt = &ata_scsi_transport_template;
5282
5283 ap = ata_shost_to_port(host);
5284
5285 ata_host_init(ap, host, host_set, ent, port_no);
5286
5287 rc = ap->ops->port_start(ap);
5288 if (rc)
5289 goto err_out;
5290
5291 return ap;
5292
5293err_out:
5294 scsi_host_put(host);
5295 return NULL;
5296}
5297
5298/**
5299 * ata_device_add - Register hardware device with ATA and SCSI layers
5300 * @ent: Probe information describing hardware device to be registered
5301 *
5302 * This function processes the information provided in the probe
5303 * information struct @ent, allocates the necessary ATA and SCSI
5304 * host information structures, initializes them, and registers
5305 * everything with requisite kernel subsystems.
5306 *
5307 * This function requests irqs, probes the ATA bus, and probes
5308 * the SCSI bus.
5309 *
5310 * LOCKING:
5311 * PCI/etc. bus probe sem.
5312 *
5313 * RETURNS:
5314 * Number of ports registered. Zero on error (no ports registered).
5315 */
5316int ata_device_add(const struct ata_probe_ent *ent)
5317{
5318 unsigned int count = 0, i;
5319 struct device *dev = ent->dev;
5320 struct ata_host_set *host_set;
5321 int rc;
5322
5323 DPRINTK("ENTER\n");
5324 /* alloc a container for our list of ATA ports (buses) */
5325 host_set = kzalloc(sizeof(struct ata_host_set) +
5326 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5327 if (!host_set)
5328 return 0;
5329 spin_lock_init(&host_set->lock);
5330
5331 host_set->dev = dev;
5332 host_set->n_ports = ent->n_ports;
5333 host_set->irq = ent->irq;
5334 host_set->mmio_base = ent->mmio_base;
5335 host_set->private_data = ent->private_data;
5336 host_set->ops = ent->port_ops;
5337 host_set->flags = ent->host_set_flags;
5338
5339 /* register each port bound to this device */
5340 for (i = 0; i < ent->n_ports; i++) {
5341 struct ata_port *ap;
5342 unsigned long xfer_mode_mask;
5343
5344 ap = ata_host_add(ent, host_set, i);
5345 if (!ap)
5346 goto err_out;
5347
5348 host_set->ports[i] = ap;
5349 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5350 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5351 (ap->pio_mask << ATA_SHIFT_PIO);
5352
5353 /* print per-port info to dmesg */
5354 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5355 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5356 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5357 ata_mode_string(xfer_mode_mask),
5358 ap->ioaddr.cmd_addr,
5359 ap->ioaddr.ctl_addr,
5360 ap->ioaddr.bmdma_addr,
5361 ent->irq);
5362
5363 ata_chk_status(ap);
5364 host_set->ops->irq_clear(ap);
5365 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5366 count++;
5367 }
5368
5369 if (!count)
5370 goto err_free_ret;
5371
5372 /* obtain irq, that is shared between channels */
5373 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5374 DRV_NAME, host_set);
5375 if (rc) {
5376 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5377 ent->irq, rc);
5378 goto err_out;
5379 }
5380
5381 /* perform each probe synchronously */
5382 DPRINTK("probe begin\n");
5383 for (i = 0; i < count; i++) {
5384 struct ata_port *ap;
5385 u32 scontrol;
5386 int rc;
5387
5388 ap = host_set->ports[i];
5389
5390 /* init sata_spd_limit to the current value */
5391 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5392 int spd = (scontrol >> 4) & 0xf;
5393 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5394 }
5395 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5396
5397 rc = scsi_add_host(ap->host, dev);
5398 if (rc) {
5399 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5400 /* FIXME: do something useful here */
5401 /* FIXME: handle unconditional calls to
5402 * scsi_scan_host and ata_host_remove, below,
5403 * at the very least
5404 */
5405 }
5406
5407 if (ap->ops->error_handler) {
5408 unsigned long flags;
5409
5410 ata_port_probe(ap);
5411
5412 /* kick EH for boot probing */
5413 spin_lock_irqsave(ap->lock, flags);
5414
5415 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5416 ap->eh_info.action |= ATA_EH_SOFTRESET;
5417
5418 ap->flags |= ATA_FLAG_LOADING;
5419 ata_port_schedule_eh(ap);
5420
5421 spin_unlock_irqrestore(ap->lock, flags);
5422
5423 /* wait for EH to finish */
5424 ata_port_wait_eh(ap);
5425 } else {
5426 DPRINTK("ata%u: bus probe begin\n", ap->id);
5427 rc = ata_bus_probe(ap);
5428 DPRINTK("ata%u: bus probe end\n", ap->id);
5429
5430 if (rc) {
5431 /* FIXME: do something useful here?
5432 * Current libata behavior will
5433 * tear down everything when
5434 * the module is removed
5435 * or the h/w is unplugged.
5436 */
5437 }
5438 }
5439 }
5440
5441 /* probes are done, now scan each port's disk(s) */
5442 DPRINTK("host probe begin\n");
5443 for (i = 0; i < count; i++) {
5444 struct ata_port *ap = host_set->ports[i];
5445
5446 ata_scsi_scan_host(ap);
5447 }
5448
5449 dev_set_drvdata(dev, host_set);
5450
5451 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5452 return ent->n_ports; /* success */
5453
5454err_out:
5455 for (i = 0; i < count; i++) {
5456 ata_host_remove(host_set->ports[i], 1);
5457 scsi_host_put(host_set->ports[i]->host);
5458 }
5459err_free_ret:
5460 kfree(host_set);
5461 VPRINTK("EXIT, returning 0\n");
5462 return 0;
5463}
5464
5465/**
5466 * ata_port_detach - Detach ATA port in prepration of device removal
5467 * @ap: ATA port to be detached
5468 *
5469 * Detach all ATA devices and the associated SCSI devices of @ap;
5470 * then, remove the associated SCSI host. @ap is guaranteed to
5471 * be quiescent on return from this function.
5472 *
5473 * LOCKING:
5474 * Kernel thread context (may sleep).
5475 */
5476void ata_port_detach(struct ata_port *ap)
5477{
5478 unsigned long flags;
5479 int i;
5480
5481 if (!ap->ops->error_handler)
5482 return;
5483
5484 /* tell EH we're leaving & flush EH */
5485 spin_lock_irqsave(ap->lock, flags);
5486 ap->flags |= ATA_FLAG_UNLOADING;
5487 spin_unlock_irqrestore(ap->lock, flags);
5488
5489 ata_port_wait_eh(ap);
5490
5491 /* EH is now guaranteed to see UNLOADING, so no new device
5492 * will be attached. Disable all existing devices.
5493 */
5494 spin_lock_irqsave(ap->lock, flags);
5495
5496 for (i = 0; i < ATA_MAX_DEVICES; i++)
5497 ata_dev_disable(&ap->device[i]);
5498
5499 spin_unlock_irqrestore(ap->lock, flags);
5500
5501 /* Final freeze & EH. All in-flight commands are aborted. EH
5502 * will be skipped and retrials will be terminated with bad
5503 * target.
5504 */
5505 spin_lock_irqsave(ap->lock, flags);
5506 ata_port_freeze(ap); /* won't be thawed */
5507 spin_unlock_irqrestore(ap->lock, flags);
5508
5509 ata_port_wait_eh(ap);
5510
5511 /* Flush hotplug task. The sequence is similar to
5512 * ata_port_flush_task().
5513 */
5514 flush_workqueue(ata_aux_wq);
5515 cancel_delayed_work(&ap->hotplug_task);
5516 flush_workqueue(ata_aux_wq);
5517
5518 /* remove the associated SCSI host */
5519 scsi_remove_host(ap->host);
5520}
5521
5522/**
5523 * ata_host_set_remove - PCI layer callback for device removal
5524 * @host_set: ATA host set that was removed
5525 *
5526 * Unregister all objects associated with this host set. Free those
5527 * objects.
5528 *
5529 * LOCKING:
5530 * Inherited from calling layer (may sleep).
5531 */
5532
5533void ata_host_set_remove(struct ata_host_set *host_set)
5534{
5535 unsigned int i;
5536
5537 for (i = 0; i < host_set->n_ports; i++)
5538 ata_port_detach(host_set->ports[i]);
5539
5540 free_irq(host_set->irq, host_set);
5541
5542 for (i = 0; i < host_set->n_ports; i++) {
5543 struct ata_port *ap = host_set->ports[i];
5544
5545 ata_scsi_release(ap->host);
5546
5547 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5548 struct ata_ioports *ioaddr = &ap->ioaddr;
5549
5550 if (ioaddr->cmd_addr == 0x1f0)
5551 release_region(0x1f0, 8);
5552 else if (ioaddr->cmd_addr == 0x170)
5553 release_region(0x170, 8);
5554 }
5555
5556 scsi_host_put(ap->host);
5557 }
5558
5559 if (host_set->ops->host_stop)
5560 host_set->ops->host_stop(host_set);
5561
5562 kfree(host_set);
5563}
5564
5565/**
5566 * ata_scsi_release - SCSI layer callback hook for host unload
5567 * @host: libata host to be unloaded
5568 *
5569 * Performs all duties necessary to shut down a libata port...
5570 * Kill port kthread, disable port, and release resources.
5571 *
5572 * LOCKING:
5573 * Inherited from SCSI layer.
5574 *
5575 * RETURNS:
5576 * One.
5577 */
5578
5579int ata_scsi_release(struct Scsi_Host *host)
5580{
5581 struct ata_port *ap = ata_shost_to_port(host);
5582
5583 DPRINTK("ENTER\n");
5584
5585 ap->ops->port_disable(ap);
5586 ata_host_remove(ap, 0);
5587
5588 DPRINTK("EXIT\n");
5589 return 1;
5590}
5591
5592/**
5593 * ata_std_ports - initialize ioaddr with standard port offsets.
5594 * @ioaddr: IO address structure to be initialized
5595 *
5596 * Utility function which initializes data_addr, error_addr,
5597 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5598 * device_addr, status_addr, and command_addr to standard offsets
5599 * relative to cmd_addr.
5600 *
5601 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5602 */
5603
5604void ata_std_ports(struct ata_ioports *ioaddr)
5605{
5606 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5607 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5608 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5609 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5610 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5611 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5612 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5613 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5614 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5615 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5616}
5617
5618
5619#ifdef CONFIG_PCI
5620
5621void ata_pci_host_stop (struct ata_host_set *host_set)
5622{
5623 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5624
5625 pci_iounmap(pdev, host_set->mmio_base);
5626}
5627
5628/**
5629 * ata_pci_remove_one - PCI layer callback for device removal
5630 * @pdev: PCI device that was removed
5631 *
5632 * PCI layer indicates to libata via this hook that
5633 * hot-unplug or module unload event has occurred.
5634 * Handle this by unregistering all objects associated
5635 * with this PCI device. Free those objects. Then finally
5636 * release PCI resources and disable device.
5637 *
5638 * LOCKING:
5639 * Inherited from PCI layer (may sleep).
5640 */
5641
5642void ata_pci_remove_one (struct pci_dev *pdev)
5643{
5644 struct device *dev = pci_dev_to_dev(pdev);
5645 struct ata_host_set *host_set = dev_get_drvdata(dev);
5646 struct ata_host_set *host_set2 = host_set->next;
5647
5648 ata_host_set_remove(host_set);
5649 if (host_set2)
5650 ata_host_set_remove(host_set2);
5651
5652 pci_release_regions(pdev);
5653 pci_disable_device(pdev);
5654 dev_set_drvdata(dev, NULL);
5655}
5656
5657/* move to PCI subsystem */
5658int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5659{
5660 unsigned long tmp = 0;
5661
5662 switch (bits->width) {
5663 case 1: {
5664 u8 tmp8 = 0;
5665 pci_read_config_byte(pdev, bits->reg, &tmp8);
5666 tmp = tmp8;
5667 break;
5668 }
5669 case 2: {
5670 u16 tmp16 = 0;
5671 pci_read_config_word(pdev, bits->reg, &tmp16);
5672 tmp = tmp16;
5673 break;
5674 }
5675 case 4: {
5676 u32 tmp32 = 0;
5677 pci_read_config_dword(pdev, bits->reg, &tmp32);
5678 tmp = tmp32;
5679 break;
5680 }
5681
5682 default:
5683 return -EINVAL;
5684 }
5685
5686 tmp &= bits->mask;
5687
5688 return (tmp == bits->val) ? 1 : 0;
5689}
5690
5691int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5692{
5693 pci_save_state(pdev);
5694 pci_disable_device(pdev);
5695 pci_set_power_state(pdev, PCI_D3hot);
5696 return 0;
5697}
5698
5699int ata_pci_device_resume(struct pci_dev *pdev)
5700{
5701 pci_set_power_state(pdev, PCI_D0);
5702 pci_restore_state(pdev);
5703 pci_enable_device(pdev);
5704 pci_set_master(pdev);
5705 return 0;
5706}
5707#endif /* CONFIG_PCI */
5708
5709
5710static int __init ata_init(void)
5711{
5712 ata_wq = create_workqueue("ata");
5713 if (!ata_wq)
5714 return -ENOMEM;
5715
5716 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5717 if (!ata_aux_wq) {
5718 destroy_workqueue(ata_wq);
5719 return -ENOMEM;
5720 }
5721
5722 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5723 return 0;
5724}
5725
5726static void __exit ata_exit(void)
5727{
5728 destroy_workqueue(ata_wq);
5729 destroy_workqueue(ata_aux_wq);
5730}
5731
5732module_init(ata_init);
5733module_exit(ata_exit);
5734
5735static unsigned long ratelimit_time;
5736static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5737
5738int ata_ratelimit(void)
5739{
5740 int rc;
5741 unsigned long flags;
5742
5743 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5744
5745 if (time_after(jiffies, ratelimit_time)) {
5746 rc = 1;
5747 ratelimit_time = jiffies + (HZ/5);
5748 } else
5749 rc = 0;
5750
5751 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5752
5753 return rc;
5754}
5755
5756/**
5757 * ata_wait_register - wait until register value changes
5758 * @reg: IO-mapped register
5759 * @mask: Mask to apply to read register value
5760 * @val: Wait condition
5761 * @interval_msec: polling interval in milliseconds
5762 * @timeout_msec: timeout in milliseconds
5763 *
5764 * Waiting for some bits of register to change is a common
5765 * operation for ATA controllers. This function reads 32bit LE
5766 * IO-mapped register @reg and tests for the following condition.
5767 *
5768 * (*@reg & mask) != val
5769 *
5770 * If the condition is met, it returns; otherwise, the process is
5771 * repeated after @interval_msec until timeout.
5772 *
5773 * LOCKING:
5774 * Kernel thread context (may sleep)
5775 *
5776 * RETURNS:
5777 * The final register value.
5778 */
5779u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5780 unsigned long interval_msec,
5781 unsigned long timeout_msec)
5782{
5783 unsigned long timeout;
5784 u32 tmp;
5785
5786 tmp = ioread32(reg);
5787
5788 /* Calculate timeout _after_ the first read to make sure
5789 * preceding writes reach the controller before starting to
5790 * eat away the timeout.
5791 */
5792 timeout = jiffies + (timeout_msec * HZ) / 1000;
5793
5794 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5795 msleep(interval_msec);
5796 tmp = ioread32(reg);
5797 }
5798
5799 return tmp;
5800}
5801
5802/*
5803 * libata is essentially a library of internal helper functions for
5804 * low-level ATA host controller drivers. As such, the API/ABI is
5805 * likely to change as new drivers are added and updated.
5806 * Do not depend on ABI/API stability.
5807 */
5808
5809EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5810EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5811EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
5812EXPORT_SYMBOL_GPL(ata_std_bios_param);
5813EXPORT_SYMBOL_GPL(ata_std_ports);
5814EXPORT_SYMBOL_GPL(ata_device_add);
5815EXPORT_SYMBOL_GPL(ata_port_detach);
5816EXPORT_SYMBOL_GPL(ata_host_set_remove);
5817EXPORT_SYMBOL_GPL(ata_sg_init);
5818EXPORT_SYMBOL_GPL(ata_sg_init_one);
5819EXPORT_SYMBOL_GPL(ata_hsm_move);
5820EXPORT_SYMBOL_GPL(ata_qc_complete);
5821EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5822EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5823EXPORT_SYMBOL_GPL(ata_tf_load);
5824EXPORT_SYMBOL_GPL(ata_tf_read);
5825EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5826EXPORT_SYMBOL_GPL(ata_std_dev_select);
5827EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5828EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5829EXPORT_SYMBOL_GPL(ata_check_status);
5830EXPORT_SYMBOL_GPL(ata_altstatus);
5831EXPORT_SYMBOL_GPL(ata_exec_command);
5832EXPORT_SYMBOL_GPL(ata_port_start);
5833EXPORT_SYMBOL_GPL(ata_port_stop);
5834EXPORT_SYMBOL_GPL(ata_host_stop);
5835EXPORT_SYMBOL_GPL(ata_interrupt);
5836EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5837EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5838EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5839EXPORT_SYMBOL_GPL(ata_qc_prep);
5840EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5841EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5842EXPORT_SYMBOL_GPL(ata_bmdma_start);
5843EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5844EXPORT_SYMBOL_GPL(ata_bmdma_status);
5845EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5846EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5847EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5848EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5849EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5850EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5851EXPORT_SYMBOL_GPL(ata_port_probe);
5852EXPORT_SYMBOL_GPL(sata_set_spd);
5853EXPORT_SYMBOL_GPL(sata_phy_debounce);
5854EXPORT_SYMBOL_GPL(sata_phy_resume);
5855EXPORT_SYMBOL_GPL(sata_phy_reset);
5856EXPORT_SYMBOL_GPL(__sata_phy_reset);
5857EXPORT_SYMBOL_GPL(ata_bus_reset);
5858EXPORT_SYMBOL_GPL(ata_std_prereset);
5859EXPORT_SYMBOL_GPL(ata_std_softreset);
5860EXPORT_SYMBOL_GPL(sata_std_hardreset);
5861EXPORT_SYMBOL_GPL(ata_std_postreset);
5862EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5863EXPORT_SYMBOL_GPL(ata_dev_classify);
5864EXPORT_SYMBOL_GPL(ata_dev_pair);
5865EXPORT_SYMBOL_GPL(ata_port_disable);
5866EXPORT_SYMBOL_GPL(ata_ratelimit);
5867EXPORT_SYMBOL_GPL(ata_wait_register);
5868EXPORT_SYMBOL_GPL(ata_busy_sleep);
5869EXPORT_SYMBOL_GPL(ata_port_queue_task);
5870EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5871EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5872EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5873EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5874EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5875EXPORT_SYMBOL_GPL(ata_scsi_release);
5876EXPORT_SYMBOL_GPL(ata_host_intr);
5877EXPORT_SYMBOL_GPL(sata_scr_valid);
5878EXPORT_SYMBOL_GPL(sata_scr_read);
5879EXPORT_SYMBOL_GPL(sata_scr_write);
5880EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5881EXPORT_SYMBOL_GPL(ata_port_online);
5882EXPORT_SYMBOL_GPL(ata_port_offline);
5883EXPORT_SYMBOL_GPL(ata_id_string);
5884EXPORT_SYMBOL_GPL(ata_id_c_string);
5885EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5886
5887EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5888EXPORT_SYMBOL_GPL(ata_timing_compute);
5889EXPORT_SYMBOL_GPL(ata_timing_merge);
5890
5891#ifdef CONFIG_PCI
5892EXPORT_SYMBOL_GPL(pci_test_config_bits);
5893EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5894EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5895EXPORT_SYMBOL_GPL(ata_pci_init_one);
5896EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5897EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5898EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5899EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5900EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5901#endif /* CONFIG_PCI */
5902
5903EXPORT_SYMBOL_GPL(ata_device_suspend);
5904EXPORT_SYMBOL_GPL(ata_device_resume);
5905EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5906EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5907
5908EXPORT_SYMBOL_GPL(ata_eng_timeout);
5909EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5910EXPORT_SYMBOL_GPL(ata_port_abort);
5911EXPORT_SYMBOL_GPL(ata_port_freeze);
5912EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5913EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5914EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5915EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5916EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
deleted file mode 100644
index 823385981a7a..000000000000
--- a/drivers/scsi/libata-eh.c
+++ /dev/null
@@ -1,1907 +0,0 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap);
50
51static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
53{
54 struct ata_ering_entry *ent;
55
56 WARN_ON(!err_mask);
57
58 ering->cursor++;
59 ering->cursor %= ATA_ERING_SIZE;
60
61 ent = &ering->ring[ering->cursor];
62 ent->is_io = is_io;
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
65}
66
67static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
68{
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
70 if (!ent->err_mask)
71 return NULL;
72 return ent;
73}
74
75static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
77 void *arg)
78{
79 int idx, rc = 0;
80 struct ata_ering_entry *ent;
81
82 idx = ering->cursor;
83 do {
84 ent = &ering->ring[idx];
85 if (!ent->err_mask)
86 break;
87 rc = map_fn(ent, arg);
88 if (rc)
89 break;
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
92
93 return rc;
94}
95
96/**
97 * ata_scsi_timed_out - SCSI layer time out callback
98 * @cmd: timed out SCSI command
99 *
100 * Handles SCSI layer timeout. We race with normal completion of
101 * the qc for @cmd. If the qc is already gone, we lose and let
102 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
103 * timed out and EH should be invoked. Prevent ata_qc_complete()
104 * from finishing it by setting EH_SCHEDULED and return
105 * EH_NOT_HANDLED.
106 *
107 * TODO: kill this function once old EH is gone.
108 *
109 * LOCKING:
110 * Called from timer context
111 *
112 * RETURNS:
113 * EH_HANDLED or EH_NOT_HANDLED
114 */
115enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
116{
117 struct Scsi_Host *host = cmd->device->host;
118 struct ata_port *ap = ata_shost_to_port(host);
119 unsigned long flags;
120 struct ata_queued_cmd *qc;
121 enum scsi_eh_timer_return ret;
122
123 DPRINTK("ENTER\n");
124
125 if (ap->ops->error_handler) {
126 ret = EH_NOT_HANDLED;
127 goto out;
128 }
129
130 ret = EH_HANDLED;
131 spin_lock_irqsave(ap->lock, flags);
132 qc = ata_qc_from_tag(ap, ap->active_tag);
133 if (qc) {
134 WARN_ON(qc->scsicmd != cmd);
135 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
136 qc->err_mask |= AC_ERR_TIMEOUT;
137 ret = EH_NOT_HANDLED;
138 }
139 spin_unlock_irqrestore(ap->lock, flags);
140
141 out:
142 DPRINTK("EXIT, ret=%d\n", ret);
143 return ret;
144}
145
146/**
147 * ata_scsi_error - SCSI layer error handler callback
148 * @host: SCSI host on which error occurred
149 *
150 * Handles SCSI-layer-thrown error events.
151 *
152 * LOCKING:
153 * Inherited from SCSI layer (none, can sleep)
154 *
155 * RETURNS:
156 * Zero.
157 */
158void ata_scsi_error(struct Scsi_Host *host)
159{
160 struct ata_port *ap = ata_shost_to_port(host);
161 spinlock_t *ap_lock = ap->lock;
162 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
163 unsigned long flags;
164
165 DPRINTK("ENTER\n");
166
167 /* synchronize with port task */
168 ata_port_flush_task(ap);
169
170 /* synchronize with host_set lock and sort out timeouts */
171
172 /* For new EH, all qcs are finished in one of three ways -
173 * normal completion, error completion, and SCSI timeout.
174 * Both cmpletions can race against SCSI timeout. When normal
175 * completion wins, the qc never reaches EH. When error
176 * completion wins, the qc has ATA_QCFLAG_FAILED set.
177 *
178 * When SCSI timeout wins, things are a bit more complex.
179 * Normal or error completion can occur after the timeout but
180 * before this point. In such cases, both types of
181 * completions are honored. A scmd is determined to have
182 * timed out iff its associated qc is active and not failed.
183 */
184 if (ap->ops->error_handler) {
185 struct scsi_cmnd *scmd, *tmp;
186 int nr_timedout = 0;
187
188 spin_lock_irqsave(ap_lock, flags);
189
190 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
191 struct ata_queued_cmd *qc;
192
193 for (i = 0; i < ATA_MAX_QUEUE; i++) {
194 qc = __ata_qc_from_tag(ap, i);
195 if (qc->flags & ATA_QCFLAG_ACTIVE &&
196 qc->scsicmd == scmd)
197 break;
198 }
199
200 if (i < ATA_MAX_QUEUE) {
201 /* the scmd has an associated qc */
202 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
203 /* which hasn't failed yet, timeout */
204 qc->err_mask |= AC_ERR_TIMEOUT;
205 qc->flags |= ATA_QCFLAG_FAILED;
206 nr_timedout++;
207 }
208 } else {
209 /* Normal completion occurred after
210 * SCSI timeout but before this point.
211 * Successfully complete it.
212 */
213 scmd->retries = scmd->allowed;
214 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
215 }
216 }
217
218 /* If we have timed out qcs. They belong to EH from
219 * this point but the state of the controller is
220 * unknown. Freeze the port to make sure the IRQ
221 * handler doesn't diddle with those qcs. This must
222 * be done atomically w.r.t. setting QCFLAG_FAILED.
223 */
224 if (nr_timedout)
225 __ata_port_freeze(ap);
226
227 spin_unlock_irqrestore(ap_lock, flags);
228 } else
229 spin_unlock_wait(ap_lock);
230
231 repeat:
232 /* invoke error handler */
233 if (ap->ops->error_handler) {
234 /* fetch & clear EH info */
235 spin_lock_irqsave(ap_lock, flags);
236
237 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
238 ap->eh_context.i = ap->eh_info;
239 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
240
241 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
242 ap->flags &= ~ATA_FLAG_EH_PENDING;
243
244 spin_unlock_irqrestore(ap_lock, flags);
245
246 /* invoke EH. if unloading, just finish failed qcs */
247 if (!(ap->flags & ATA_FLAG_UNLOADING))
248 ap->ops->error_handler(ap);
249 else
250 ata_eh_finish(ap);
251
252 /* Exception might have happend after ->error_handler
253 * recovered the port but before this point. Repeat
254 * EH in such case.
255 */
256 spin_lock_irqsave(ap_lock, flags);
257
258 if (ap->flags & ATA_FLAG_EH_PENDING) {
259 if (--repeat_cnt) {
260 ata_port_printk(ap, KERN_INFO,
261 "EH pending after completion, "
262 "repeating EH (cnt=%d)\n", repeat_cnt);
263 spin_unlock_irqrestore(ap_lock, flags);
264 goto repeat;
265 }
266 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
267 "tries, giving up\n", ATA_EH_MAX_REPEAT);
268 }
269
270 /* this run is complete, make sure EH info is clear */
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
272
273 /* Clear host_eh_scheduled while holding ap_lock such
274 * that if exception occurs after this point but
275 * before EH completion, SCSI midlayer will
276 * re-initiate EH.
277 */
278 host->host_eh_scheduled = 0;
279
280 spin_unlock_irqrestore(ap_lock, flags);
281 } else {
282 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
283 ap->ops->eng_timeout(ap);
284 }
285
286 /* finish or retry handled scmd's and clean up */
287 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
288
289 scsi_eh_flush_done_q(&ap->eh_done_q);
290
291 /* clean up */
292 spin_lock_irqsave(ap_lock, flags);
293
294 if (ap->flags & ATA_FLAG_LOADING) {
295 ap->flags &= ~ATA_FLAG_LOADING;
296 } else {
297 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
298 queue_work(ata_aux_wq, &ap->hotplug_task);
299 if (ap->flags & ATA_FLAG_RECOVERED)
300 ata_port_printk(ap, KERN_INFO, "EH complete\n");
301 }
302
303 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
304
305 /* tell wait_eh that we're done */
306 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
307 wake_up_all(&ap->eh_wait_q);
308
309 spin_unlock_irqrestore(ap_lock, flags);
310
311 DPRINTK("EXIT\n");
312}
313
314/**
315 * ata_port_wait_eh - Wait for the currently pending EH to complete
316 * @ap: Port to wait EH for
317 *
318 * Wait until the currently pending EH is complete.
319 *
320 * LOCKING:
321 * Kernel thread context (may sleep).
322 */
323void ata_port_wait_eh(struct ata_port *ap)
324{
325 unsigned long flags;
326 DEFINE_WAIT(wait);
327
328 retry:
329 spin_lock_irqsave(ap->lock, flags);
330
331 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
332 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
333 spin_unlock_irqrestore(ap->lock, flags);
334 schedule();
335 spin_lock_irqsave(ap->lock, flags);
336 }
337 finish_wait(&ap->eh_wait_q, &wait);
338
339 spin_unlock_irqrestore(ap->lock, flags);
340
341 /* make sure SCSI EH is complete */
342 if (scsi_host_in_recovery(ap->host)) {
343 msleep(10);
344 goto retry;
345 }
346}
347
348/**
349 * ata_qc_timeout - Handle timeout of queued command
350 * @qc: Command that timed out
351 *
352 * Some part of the kernel (currently, only the SCSI layer)
353 * has noticed that the active command on port @ap has not
354 * completed after a specified length of time. Handle this
355 * condition by disabling DMA (if necessary) and completing
356 * transactions, with error if necessary.
357 *
358 * This also handles the case of the "lost interrupt", where
359 * for some reason (possibly hardware bug, possibly driver bug)
360 * an interrupt was not delivered to the driver, even though the
361 * transaction completed successfully.
362 *
363 * TODO: kill this function once old EH is gone.
364 *
365 * LOCKING:
366 * Inherited from SCSI layer (none, can sleep)
367 */
368static void ata_qc_timeout(struct ata_queued_cmd *qc)
369{
370 struct ata_port *ap = qc->ap;
371 u8 host_stat = 0, drv_stat;
372 unsigned long flags;
373
374 DPRINTK("ENTER\n");
375
376 ap->hsm_task_state = HSM_ST_IDLE;
377
378 spin_lock_irqsave(ap->lock, flags);
379
380 switch (qc->tf.protocol) {
381
382 case ATA_PROT_DMA:
383 case ATA_PROT_ATAPI_DMA:
384 host_stat = ap->ops->bmdma_status(ap);
385
386 /* before we do anything else, clear DMA-Start bit */
387 ap->ops->bmdma_stop(qc);
388
389 /* fall through */
390
391 default:
392 ata_altstatus(ap);
393 drv_stat = ata_chk_status(ap);
394
395 /* ack bmdma irq events */
396 ap->ops->irq_clear(ap);
397
398 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
399 "stat 0x%x host_stat 0x%x\n",
400 qc->tf.command, drv_stat, host_stat);
401
402 /* complete taskfile transaction */
403 qc->err_mask |= AC_ERR_TIMEOUT;
404 break;
405 }
406
407 spin_unlock_irqrestore(ap->lock, flags);
408
409 ata_eh_qc_complete(qc);
410
411 DPRINTK("EXIT\n");
412}
413
414/**
415 * ata_eng_timeout - Handle timeout of queued command
416 * @ap: Port on which timed-out command is active
417 *
418 * Some part of the kernel (currently, only the SCSI layer)
419 * has noticed that the active command on port @ap has not
420 * completed after a specified length of time. Handle this
421 * condition by disabling DMA (if necessary) and completing
422 * transactions, with error if necessary.
423 *
424 * This also handles the case of the "lost interrupt", where
425 * for some reason (possibly hardware bug, possibly driver bug)
426 * an interrupt was not delivered to the driver, even though the
427 * transaction completed successfully.
428 *
429 * TODO: kill this function once old EH is gone.
430 *
431 * LOCKING:
432 * Inherited from SCSI layer (none, can sleep)
433 */
434void ata_eng_timeout(struct ata_port *ap)
435{
436 DPRINTK("ENTER\n");
437
438 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
439
440 DPRINTK("EXIT\n");
441}
442
443/**
444 * ata_qc_schedule_eh - schedule qc for error handling
445 * @qc: command to schedule error handling for
446 *
447 * Schedule error handling for @qc. EH will kick in as soon as
448 * other commands are drained.
449 *
450 * LOCKING:
451 * spin_lock_irqsave(host_set lock)
452 */
453void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
454{
455 struct ata_port *ap = qc->ap;
456
457 WARN_ON(!ap->ops->error_handler);
458
459 qc->flags |= ATA_QCFLAG_FAILED;
460 qc->ap->flags |= ATA_FLAG_EH_PENDING;
461
462 /* The following will fail if timeout has already expired.
463 * ata_scsi_error() takes care of such scmds on EH entry.
464 * Note that ATA_QCFLAG_FAILED is unconditionally set after
465 * this function completes.
466 */
467 scsi_req_abort_cmd(qc->scsicmd);
468}
469
470/**
471 * ata_port_schedule_eh - schedule error handling without a qc
472 * @ap: ATA port to schedule EH for
473 *
474 * Schedule error handling for @ap. EH will kick in as soon as
475 * all commands are drained.
476 *
477 * LOCKING:
478 * spin_lock_irqsave(host_set lock)
479 */
480void ata_port_schedule_eh(struct ata_port *ap)
481{
482 WARN_ON(!ap->ops->error_handler);
483
484 ap->flags |= ATA_FLAG_EH_PENDING;
485 scsi_schedule_eh(ap->host);
486
487 DPRINTK("port EH scheduled\n");
488}
489
490/**
491 * ata_port_abort - abort all qc's on the port
492 * @ap: ATA port to abort qc's for
493 *
494 * Abort all active qc's of @ap and schedule EH.
495 *
496 * LOCKING:
497 * spin_lock_irqsave(host_set lock)
498 *
499 * RETURNS:
500 * Number of aborted qc's.
501 */
502int ata_port_abort(struct ata_port *ap)
503{
504 int tag, nr_aborted = 0;
505
506 WARN_ON(!ap->ops->error_handler);
507
508 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
509 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
510
511 if (qc) {
512 qc->flags |= ATA_QCFLAG_FAILED;
513 ata_qc_complete(qc);
514 nr_aborted++;
515 }
516 }
517
518 if (!nr_aborted)
519 ata_port_schedule_eh(ap);
520
521 return nr_aborted;
522}
523
524/**
525 * __ata_port_freeze - freeze port
526 * @ap: ATA port to freeze
527 *
528 * This function is called when HSM violation or some other
529 * condition disrupts normal operation of the port. Frozen port
530 * is not allowed to perform any operation until the port is
531 * thawed, which usually follows a successful reset.
532 *
533 * ap->ops->freeze() callback can be used for freezing the port
534 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
535 * port cannot be frozen hardware-wise, the interrupt handler
536 * must ack and clear interrupts unconditionally while the port
537 * is frozen.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host_set lock)
541 */
542static void __ata_port_freeze(struct ata_port *ap)
543{
544 WARN_ON(!ap->ops->error_handler);
545
546 if (ap->ops->freeze)
547 ap->ops->freeze(ap);
548
549 ap->flags |= ATA_FLAG_FROZEN;
550
551 DPRINTK("ata%u port frozen\n", ap->id);
552}
553
554/**
555 * ata_port_freeze - abort & freeze port
556 * @ap: ATA port to freeze
557 *
558 * Abort and freeze @ap.
559 *
560 * LOCKING:
561 * spin_lock_irqsave(host_set lock)
562 *
563 * RETURNS:
564 * Number of aborted commands.
565 */
566int ata_port_freeze(struct ata_port *ap)
567{
568 int nr_aborted;
569
570 WARN_ON(!ap->ops->error_handler);
571
572 nr_aborted = ata_port_abort(ap);
573 __ata_port_freeze(ap);
574
575 return nr_aborted;
576}
577
578/**
579 * ata_eh_freeze_port - EH helper to freeze port
580 * @ap: ATA port to freeze
581 *
582 * Freeze @ap.
583 *
584 * LOCKING:
585 * None.
586 */
587void ata_eh_freeze_port(struct ata_port *ap)
588{
589 unsigned long flags;
590
591 if (!ap->ops->error_handler)
592 return;
593
594 spin_lock_irqsave(ap->lock, flags);
595 __ata_port_freeze(ap);
596 spin_unlock_irqrestore(ap->lock, flags);
597}
598
599/**
600 * ata_port_thaw_port - EH helper to thaw port
601 * @ap: ATA port to thaw
602 *
603 * Thaw frozen port @ap.
604 *
605 * LOCKING:
606 * None.
607 */
608void ata_eh_thaw_port(struct ata_port *ap)
609{
610 unsigned long flags;
611
612 if (!ap->ops->error_handler)
613 return;
614
615 spin_lock_irqsave(ap->lock, flags);
616
617 ap->flags &= ~ATA_FLAG_FROZEN;
618
619 if (ap->ops->thaw)
620 ap->ops->thaw(ap);
621
622 spin_unlock_irqrestore(ap->lock, flags);
623
624 DPRINTK("ata%u port thawed\n", ap->id);
625}
626
627static void ata_eh_scsidone(struct scsi_cmnd *scmd)
628{
629 /* nada */
630}
631
632static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
633{
634 struct ata_port *ap = qc->ap;
635 struct scsi_cmnd *scmd = qc->scsicmd;
636 unsigned long flags;
637
638 spin_lock_irqsave(ap->lock, flags);
639 qc->scsidone = ata_eh_scsidone;
640 __ata_qc_complete(qc);
641 WARN_ON(ata_tag_valid(qc->tag));
642 spin_unlock_irqrestore(ap->lock, flags);
643
644 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
645}
646
647/**
648 * ata_eh_qc_complete - Complete an active ATA command from EH
649 * @qc: Command to complete
650 *
651 * Indicate to the mid and upper layers that an ATA command has
652 * completed. To be used from EH.
653 */
654void ata_eh_qc_complete(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *scmd = qc->scsicmd;
657 scmd->retries = scmd->allowed;
658 __ata_eh_qc_complete(qc);
659}
660
661/**
662 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
663 * @qc: Command to retry
664 *
665 * Indicate to the mid and upper layers that an ATA command
666 * should be retried. To be used from EH.
667 *
668 * SCSI midlayer limits the number of retries to scmd->allowed.
669 * scmd->retries is decremented for commands which get retried
670 * due to unrelated failures (qc->err_mask is zero).
671 */
672void ata_eh_qc_retry(struct ata_queued_cmd *qc)
673{
674 struct scsi_cmnd *scmd = qc->scsicmd;
675 if (!qc->err_mask && scmd->retries)
676 scmd->retries--;
677 __ata_eh_qc_complete(qc);
678}
679
680/**
681 * ata_eh_detach_dev - detach ATA device
682 * @dev: ATA device to detach
683 *
684 * Detach @dev.
685 *
686 * LOCKING:
687 * None.
688 */
689static void ata_eh_detach_dev(struct ata_device *dev)
690{
691 struct ata_port *ap = dev->ap;
692 unsigned long flags;
693
694 ata_dev_disable(dev);
695
696 spin_lock_irqsave(ap->lock, flags);
697
698 dev->flags &= ~ATA_DFLAG_DETACH;
699
700 if (ata_scsi_offline_dev(dev)) {
701 dev->flags |= ATA_DFLAG_DETACHED;
702 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
703 }
704
705 spin_unlock_irqrestore(ap->lock, flags);
706}
707
708static void ata_eh_clear_action(struct ata_device *dev,
709 struct ata_eh_info *ehi, unsigned int action)
710{
711 int i;
712
713 if (!dev) {
714 ehi->action &= ~action;
715 for (i = 0; i < ATA_MAX_DEVICES; i++)
716 ehi->dev_action[i] &= ~action;
717 } else {
718 /* doesn't make sense for port-wide EH actions */
719 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
720
721 /* break ehi->action into ehi->dev_action */
722 if (ehi->action & action) {
723 for (i = 0; i < ATA_MAX_DEVICES; i++)
724 ehi->dev_action[i] |= ehi->action & action;
725 ehi->action &= ~action;
726 }
727
728 /* turn off the specified per-dev action */
729 ehi->dev_action[dev->devno] &= ~action;
730 }
731}
732
733/**
734 * ata_eh_about_to_do - about to perform eh_action
735 * @ap: target ATA port
736 * @dev: target ATA dev for per-dev action (can be NULL)
737 * @action: action about to be performed
738 *
739 * Called just before performing EH actions to clear related bits
740 * in @ap->eh_info such that eh actions are not unnecessarily
741 * repeated.
742 *
743 * LOCKING:
744 * None.
745 */
746static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
747 unsigned int action)
748{
749 unsigned long flags;
750
751 spin_lock_irqsave(ap->lock, flags);
752 ata_eh_clear_action(dev, &ap->eh_info, action);
753 ap->flags |= ATA_FLAG_RECOVERED;
754 spin_unlock_irqrestore(ap->lock, flags);
755}
756
757/**
758 * ata_eh_done - EH action complete
759 * @ap: target ATA port
760 * @dev: target ATA dev for per-dev action (can be NULL)
761 * @action: action just completed
762 *
763 * Called right after performing EH actions to clear related bits
764 * in @ap->eh_context.
765 *
766 * LOCKING:
767 * None.
768 */
769static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
770 unsigned int action)
771{
772 ata_eh_clear_action(dev, &ap->eh_context.i, action);
773}
774
775/**
776 * ata_err_string - convert err_mask to descriptive string
777 * @err_mask: error mask to convert to string
778 *
779 * Convert @err_mask to descriptive string. Errors are
780 * prioritized according to severity and only the most severe
781 * error is reported.
782 *
783 * LOCKING:
784 * None.
785 *
786 * RETURNS:
787 * Descriptive string for @err_mask
788 */
789static const char * ata_err_string(unsigned int err_mask)
790{
791 if (err_mask & AC_ERR_HOST_BUS)
792 return "host bus error";
793 if (err_mask & AC_ERR_ATA_BUS)
794 return "ATA bus error";
795 if (err_mask & AC_ERR_TIMEOUT)
796 return "timeout";
797 if (err_mask & AC_ERR_HSM)
798 return "HSM violation";
799 if (err_mask & AC_ERR_SYSTEM)
800 return "internal error";
801 if (err_mask & AC_ERR_MEDIA)
802 return "media error";
803 if (err_mask & AC_ERR_INVALID)
804 return "invalid argument";
805 if (err_mask & AC_ERR_DEV)
806 return "device error";
807 return "unknown error";
808}
809
810/**
811 * ata_read_log_page - read a specific log page
812 * @dev: target device
813 * @page: page to read
814 * @buf: buffer to store read page
815 * @sectors: number of sectors to read
816 *
817 * Read log page using READ_LOG_EXT command.
818 *
819 * LOCKING:
820 * Kernel thread context (may sleep).
821 *
822 * RETURNS:
823 * 0 on success, AC_ERR_* mask otherwise.
824 */
825static unsigned int ata_read_log_page(struct ata_device *dev,
826 u8 page, void *buf, unsigned int sectors)
827{
828 struct ata_taskfile tf;
829 unsigned int err_mask;
830
831 DPRINTK("read log page - page %d\n", page);
832
833 ata_tf_init(dev, &tf);
834 tf.command = ATA_CMD_READ_LOG_EXT;
835 tf.lbal = page;
836 tf.nsect = sectors;
837 tf.hob_nsect = sectors >> 8;
838 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
839 tf.protocol = ATA_PROT_PIO;
840
841 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
842 buf, sectors * ATA_SECT_SIZE);
843
844 DPRINTK("EXIT, err_mask=%x\n", err_mask);
845 return err_mask;
846}
847
848/**
849 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
850 * @dev: Device to read log page 10h from
851 * @tag: Resulting tag of the failed command
852 * @tf: Resulting taskfile registers of the failed command
853 *
854 * Read log page 10h to obtain NCQ error details and clear error
855 * condition.
856 *
857 * LOCKING:
858 * Kernel thread context (may sleep).
859 *
860 * RETURNS:
861 * 0 on success, -errno otherwise.
862 */
863static int ata_eh_read_log_10h(struct ata_device *dev,
864 int *tag, struct ata_taskfile *tf)
865{
866 u8 *buf = dev->ap->sector_buf;
867 unsigned int err_mask;
868 u8 csum;
869 int i;
870
871 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
872 if (err_mask)
873 return -EIO;
874
875 csum = 0;
876 for (i = 0; i < ATA_SECT_SIZE; i++)
877 csum += buf[i];
878 if (csum)
879 ata_dev_printk(dev, KERN_WARNING,
880 "invalid checksum 0x%x on log page 10h\n", csum);
881
882 if (buf[0] & 0x80)
883 return -ENOENT;
884
885 *tag = buf[0] & 0x1f;
886
887 tf->command = buf[2];
888 tf->feature = buf[3];
889 tf->lbal = buf[4];
890 tf->lbam = buf[5];
891 tf->lbah = buf[6];
892 tf->device = buf[7];
893 tf->hob_lbal = buf[8];
894 tf->hob_lbam = buf[9];
895 tf->hob_lbah = buf[10];
896 tf->nsect = buf[12];
897 tf->hob_nsect = buf[13];
898
899 return 0;
900}
901
902/**
903 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
904 * @dev: device to perform REQUEST_SENSE to
905 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
906 *
907 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
908 * SENSE. This function is EH helper.
909 *
910 * LOCKING:
911 * Kernel thread context (may sleep).
912 *
913 * RETURNS:
914 * 0 on success, AC_ERR_* mask on failure
915 */
916static unsigned int atapi_eh_request_sense(struct ata_device *dev,
917 unsigned char *sense_buf)
918{
919 struct ata_port *ap = dev->ap;
920 struct ata_taskfile tf;
921 u8 cdb[ATAPI_CDB_LEN];
922
923 DPRINTK("ATAPI request sense\n");
924
925 ata_tf_init(dev, &tf);
926
927 /* FIXME: is this needed? */
928 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
929
930 /* XXX: why tf_read here? */
931 ap->ops->tf_read(ap, &tf);
932
933 /* fill these in, for the case where they are -not- overwritten */
934 sense_buf[0] = 0x70;
935 sense_buf[2] = tf.feature >> 4;
936
937 memset(cdb, 0, ATAPI_CDB_LEN);
938 cdb[0] = REQUEST_SENSE;
939 cdb[4] = SCSI_SENSE_BUFFERSIZE;
940
941 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
942 tf.command = ATA_CMD_PACKET;
943
944 /* is it pointless to prefer PIO for "safety reasons"? */
945 if (ap->flags & ATA_FLAG_PIO_DMA) {
946 tf.protocol = ATA_PROT_ATAPI_DMA;
947 tf.feature |= ATAPI_PKT_DMA;
948 } else {
949 tf.protocol = ATA_PROT_ATAPI;
950 tf.lbam = (8 * 1024) & 0xff;
951 tf.lbah = (8 * 1024) >> 8;
952 }
953
954 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
955 sense_buf, SCSI_SENSE_BUFFERSIZE);
956}
957
958/**
959 * ata_eh_analyze_serror - analyze SError for a failed port
960 * @ap: ATA port to analyze SError for
961 *
962 * Analyze SError if available and further determine cause of
963 * failure.
964 *
965 * LOCKING:
966 * None.
967 */
968static void ata_eh_analyze_serror(struct ata_port *ap)
969{
970 struct ata_eh_context *ehc = &ap->eh_context;
971 u32 serror = ehc->i.serror;
972 unsigned int err_mask = 0, action = 0;
973
974 if (serror & SERR_PERSISTENT) {
975 err_mask |= AC_ERR_ATA_BUS;
976 action |= ATA_EH_HARDRESET;
977 }
978 if (serror &
979 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
980 err_mask |= AC_ERR_ATA_BUS;
981 action |= ATA_EH_SOFTRESET;
982 }
983 if (serror & SERR_PROTOCOL) {
984 err_mask |= AC_ERR_HSM;
985 action |= ATA_EH_SOFTRESET;
986 }
987 if (serror & SERR_INTERNAL) {
988 err_mask |= AC_ERR_SYSTEM;
989 action |= ATA_EH_SOFTRESET;
990 }
991 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
992 ata_ehi_hotplugged(&ehc->i);
993
994 ehc->i.err_mask |= err_mask;
995 ehc->i.action |= action;
996}
997
998/**
999 * ata_eh_analyze_ncq_error - analyze NCQ error
1000 * @ap: ATA port to analyze NCQ error for
1001 *
1002 * Read log page 10h, determine the offending qc and acquire
1003 * error status TF. For NCQ device errors, all LLDDs have to do
1004 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1005 * care of the rest.
1006 *
1007 * LOCKING:
1008 * Kernel thread context (may sleep).
1009 */
1010static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1011{
1012 struct ata_eh_context *ehc = &ap->eh_context;
1013 struct ata_device *dev = ap->device;
1014 struct ata_queued_cmd *qc;
1015 struct ata_taskfile tf;
1016 int tag, rc;
1017
1018 /* if frozen, we can't do much */
1019 if (ap->flags & ATA_FLAG_FROZEN)
1020 return;
1021
1022 /* is it NCQ device error? */
1023 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1024 return;
1025
1026 /* has LLDD analyzed already? */
1027 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1028 qc = __ata_qc_from_tag(ap, tag);
1029
1030 if (!(qc->flags & ATA_QCFLAG_FAILED))
1031 continue;
1032
1033 if (qc->err_mask)
1034 return;
1035 }
1036
1037 /* okay, this error is ours */
1038 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1039 if (rc) {
1040 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1041 "(errno=%d)\n", rc);
1042 return;
1043 }
1044
1045 if (!(ap->sactive & (1 << tag))) {
1046 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1047 "inactive tag %d\n", tag);
1048 return;
1049 }
1050
1051 /* we've got the perpetrator, condemn it */
1052 qc = __ata_qc_from_tag(ap, tag);
1053 memcpy(&qc->result_tf, &tf, sizeof(tf));
1054 qc->err_mask |= AC_ERR_DEV;
1055 ehc->i.err_mask &= ~AC_ERR_DEV;
1056}
1057
1058/**
1059 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1060 * @qc: qc to analyze
1061 * @tf: Taskfile registers to analyze
1062 *
1063 * Analyze taskfile of @qc and further determine cause of
1064 * failure. This function also requests ATAPI sense data if
1065 * avaliable.
1066 *
1067 * LOCKING:
1068 * Kernel thread context (may sleep).
1069 *
1070 * RETURNS:
1071 * Determined recovery action
1072 */
1073static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1074 const struct ata_taskfile *tf)
1075{
1076 unsigned int tmp, action = 0;
1077 u8 stat = tf->command, err = tf->feature;
1078
1079 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1080 qc->err_mask |= AC_ERR_HSM;
1081 return ATA_EH_SOFTRESET;
1082 }
1083
1084 if (!(qc->err_mask & AC_ERR_DEV))
1085 return 0;
1086
1087 switch (qc->dev->class) {
1088 case ATA_DEV_ATA:
1089 if (err & ATA_ICRC)
1090 qc->err_mask |= AC_ERR_ATA_BUS;
1091 if (err & ATA_UNC)
1092 qc->err_mask |= AC_ERR_MEDIA;
1093 if (err & ATA_IDNF)
1094 qc->err_mask |= AC_ERR_INVALID;
1095 break;
1096
1097 case ATA_DEV_ATAPI:
1098 tmp = atapi_eh_request_sense(qc->dev,
1099 qc->scsicmd->sense_buffer);
1100 if (!tmp) {
1101 /* ATA_QCFLAG_SENSE_VALID is used to tell
1102 * atapi_qc_complete() that sense data is
1103 * already valid.
1104 *
1105 * TODO: interpret sense data and set
1106 * appropriate err_mask.
1107 */
1108 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1109 } else
1110 qc->err_mask |= tmp;
1111 }
1112
1113 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1114 action |= ATA_EH_SOFTRESET;
1115
1116 return action;
1117}
1118
1119static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1120{
1121 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1122 return 1;
1123
1124 if (ent->is_io) {
1125 if (ent->err_mask & AC_ERR_HSM)
1126 return 1;
1127 if ((ent->err_mask &
1128 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1129 return 2;
1130 }
1131
1132 return 0;
1133}
1134
1135struct speed_down_needed_arg {
1136 u64 since;
1137 int nr_errors[3];
1138};
1139
1140static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1141{
1142 struct speed_down_needed_arg *arg = void_arg;
1143
1144 if (ent->timestamp < arg->since)
1145 return -1;
1146
1147 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1148 return 0;
1149}
1150
1151/**
1152 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1153 * @dev: Device of interest
1154 *
1155 * This function examines error ring of @dev and determines
1156 * whether speed down is necessary. Speed down is necessary if
1157 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1158 * errors during last 15 minutes.
1159 *
1160 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1161 * violation for known supported commands.
1162 *
1163 * Cat-2 errors are unclassified DEV error for known supported
1164 * command.
1165 *
1166 * LOCKING:
1167 * Inherited from caller.
1168 *
1169 * RETURNS:
1170 * 1 if speed down is necessary, 0 otherwise
1171 */
1172static int ata_eh_speed_down_needed(struct ata_device *dev)
1173{
1174 const u64 interval = 15LLU * 60 * HZ;
1175 static const int err_limits[3] = { -1, 3, 10 };
1176 struct speed_down_needed_arg arg;
1177 struct ata_ering_entry *ent;
1178 int err_cat;
1179 u64 j64;
1180
1181 ent = ata_ering_top(&dev->ering);
1182 if (!ent)
1183 return 0;
1184
1185 err_cat = ata_eh_categorize_ering_entry(ent);
1186 if (err_cat == 0)
1187 return 0;
1188
1189 memset(&arg, 0, sizeof(arg));
1190
1191 j64 = get_jiffies_64();
1192 if (j64 >= interval)
1193 arg.since = j64 - interval;
1194 else
1195 arg.since = 0;
1196
1197 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1198
1199 return arg.nr_errors[err_cat] > err_limits[err_cat];
1200}
1201
1202/**
1203 * ata_eh_speed_down - record error and speed down if necessary
1204 * @dev: Failed device
1205 * @is_io: Did the device fail during normal IO?
1206 * @err_mask: err_mask of the error
1207 *
1208 * Record error and examine error history to determine whether
1209 * adjusting transmission speed is necessary. It also sets
1210 * transmission limits appropriately if such adjustment is
1211 * necessary.
1212 *
1213 * LOCKING:
1214 * Kernel thread context (may sleep).
1215 *
1216 * RETURNS:
1217 * 0 on success, -errno otherwise
1218 */
1219static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1220 unsigned int err_mask)
1221{
1222 if (!err_mask)
1223 return 0;
1224
1225 /* record error and determine whether speed down is necessary */
1226 ata_ering_record(&dev->ering, is_io, err_mask);
1227
1228 if (!ata_eh_speed_down_needed(dev))
1229 return 0;
1230
1231 /* speed down SATA link speed if possible */
1232 if (sata_down_spd_limit(dev->ap) == 0)
1233 return ATA_EH_HARDRESET;
1234
1235 /* lower transfer mode */
1236 if (ata_down_xfermask_limit(dev, 0) == 0)
1237 return ATA_EH_SOFTRESET;
1238
1239 ata_dev_printk(dev, KERN_ERR,
1240 "speed down requested but no transfer mode left\n");
1241 return 0;
1242}
1243
1244/**
1245 * ata_eh_autopsy - analyze error and determine recovery action
1246 * @ap: ATA port to perform autopsy on
1247 *
1248 * Analyze why @ap failed and determine which recovery action is
1249 * needed. This function also sets more detailed AC_ERR_* values
1250 * and fills sense data for ATAPI CHECK SENSE.
1251 *
1252 * LOCKING:
1253 * Kernel thread context (may sleep).
1254 */
1255static void ata_eh_autopsy(struct ata_port *ap)
1256{
1257 struct ata_eh_context *ehc = &ap->eh_context;
1258 unsigned int action = ehc->i.action;
1259 struct ata_device *failed_dev = NULL;
1260 unsigned int all_err_mask = 0;
1261 int tag, is_io = 0;
1262 u32 serror;
1263 int rc;
1264
1265 DPRINTK("ENTER\n");
1266
1267 /* obtain and analyze SError */
1268 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1269 if (rc == 0) {
1270 ehc->i.serror |= serror;
1271 ata_eh_analyze_serror(ap);
1272 } else if (rc != -EOPNOTSUPP)
1273 action |= ATA_EH_HARDRESET;
1274
1275 /* analyze NCQ failure */
1276 ata_eh_analyze_ncq_error(ap);
1277
1278 /* any real error trumps AC_ERR_OTHER */
1279 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1280 ehc->i.err_mask &= ~AC_ERR_OTHER;
1281
1282 all_err_mask |= ehc->i.err_mask;
1283
1284 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1285 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1286
1287 if (!(qc->flags & ATA_QCFLAG_FAILED))
1288 continue;
1289
1290 /* inherit upper level err_mask */
1291 qc->err_mask |= ehc->i.err_mask;
1292
1293 /* analyze TF */
1294 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1295
1296 /* DEV errors are probably spurious in case of ATA_BUS error */
1297 if (qc->err_mask & AC_ERR_ATA_BUS)
1298 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1299 AC_ERR_INVALID);
1300
1301 /* any real error trumps unknown error */
1302 if (qc->err_mask & ~AC_ERR_OTHER)
1303 qc->err_mask &= ~AC_ERR_OTHER;
1304
1305 /* SENSE_VALID trumps dev/unknown error and revalidation */
1306 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1307 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1308 action &= ~ATA_EH_REVALIDATE;
1309 }
1310
1311 /* accumulate error info */
1312 failed_dev = qc->dev;
1313 all_err_mask |= qc->err_mask;
1314 if (qc->flags & ATA_QCFLAG_IO)
1315 is_io = 1;
1316 }
1317
1318 /* enforce default EH actions */
1319 if (ap->flags & ATA_FLAG_FROZEN ||
1320 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1321 action |= ATA_EH_SOFTRESET;
1322 else if (all_err_mask)
1323 action |= ATA_EH_REVALIDATE;
1324
1325 /* if we have offending qcs and the associated failed device */
1326 if (failed_dev) {
1327 /* speed down */
1328 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1329
1330 /* perform per-dev EH action only on the offending device */
1331 ehc->i.dev_action[failed_dev->devno] |=
1332 action & ATA_EH_PERDEV_MASK;
1333 action &= ~ATA_EH_PERDEV_MASK;
1334 }
1335
1336 /* record autopsy result */
1337 ehc->i.dev = failed_dev;
1338 ehc->i.action = action;
1339
1340 DPRINTK("EXIT\n");
1341}
1342
1343/**
1344 * ata_eh_report - report error handling to user
1345 * @ap: ATA port EH is going on
1346 *
1347 * Report EH to user.
1348 *
1349 * LOCKING:
1350 * None.
1351 */
1352static void ata_eh_report(struct ata_port *ap)
1353{
1354 struct ata_eh_context *ehc = &ap->eh_context;
1355 const char *frozen, *desc;
1356 int tag, nr_failed = 0;
1357
1358 desc = NULL;
1359 if (ehc->i.desc[0] != '\0')
1360 desc = ehc->i.desc;
1361
1362 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1363 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1364
1365 if (!(qc->flags & ATA_QCFLAG_FAILED))
1366 continue;
1367 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1368 continue;
1369
1370 nr_failed++;
1371 }
1372
1373 if (!nr_failed && !ehc->i.err_mask)
1374 return;
1375
1376 frozen = "";
1377 if (ap->flags & ATA_FLAG_FROZEN)
1378 frozen = " frozen";
1379
1380 if (ehc->i.dev) {
1381 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1382 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1383 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1384 ehc->i.action, frozen);
1385 if (desc)
1386 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1387 } else {
1388 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1389 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1390 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1391 ehc->i.action, frozen);
1392 if (desc)
1393 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1394 }
1395
1396 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1397 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1398
1399 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1400 continue;
1401
1402 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1403 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1404 qc->tag, qc->tf.command, qc->err_mask,
1405 qc->result_tf.command, qc->result_tf.feature,
1406 ata_err_string(qc->err_mask));
1407 }
1408}
1409
1410static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1411 unsigned int *classes)
1412{
1413 int i, rc;
1414
1415 for (i = 0; i < ATA_MAX_DEVICES; i++)
1416 classes[i] = ATA_DEV_UNKNOWN;
1417
1418 rc = reset(ap, classes);
1419 if (rc)
1420 return rc;
1421
1422 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1423 * is complete and convert all ATA_DEV_UNKNOWN to
1424 * ATA_DEV_NONE.
1425 */
1426 for (i = 0; i < ATA_MAX_DEVICES; i++)
1427 if (classes[i] != ATA_DEV_UNKNOWN)
1428 break;
1429
1430 if (i < ATA_MAX_DEVICES)
1431 for (i = 0; i < ATA_MAX_DEVICES; i++)
1432 if (classes[i] == ATA_DEV_UNKNOWN)
1433 classes[i] = ATA_DEV_NONE;
1434
1435 return 0;
1436}
1437
1438static int ata_eh_followup_srst_needed(int rc, int classify,
1439 const unsigned int *classes)
1440{
1441 if (rc == -EAGAIN)
1442 return 1;
1443 if (rc != 0)
1444 return 0;
1445 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1446 return 1;
1447 return 0;
1448}
1449
1450static int ata_eh_reset(struct ata_port *ap, int classify,
1451 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1452 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1453{
1454 struct ata_eh_context *ehc = &ap->eh_context;
1455 unsigned int *classes = ehc->classes;
1456 int tries = ATA_EH_RESET_TRIES;
1457 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1458 unsigned int action;
1459 ata_reset_fn_t reset;
1460 int i, did_followup_srst, rc;
1461
1462 /* Determine which reset to use and record in ehc->i.action.
1463 * prereset() may examine and modify it.
1464 */
1465 action = ehc->i.action;
1466 ehc->i.action &= ~ATA_EH_RESET_MASK;
1467 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1468 !(action & ATA_EH_HARDRESET))))
1469 ehc->i.action |= ATA_EH_SOFTRESET;
1470 else
1471 ehc->i.action |= ATA_EH_HARDRESET;
1472
1473 if (prereset) {
1474 rc = prereset(ap);
1475 if (rc) {
1476 ata_port_printk(ap, KERN_ERR,
1477 "prereset failed (errno=%d)\n", rc);
1478 return rc;
1479 }
1480 }
1481
1482 /* prereset() might have modified ehc->i.action */
1483 if (ehc->i.action & ATA_EH_HARDRESET)
1484 reset = hardreset;
1485 else if (ehc->i.action & ATA_EH_SOFTRESET)
1486 reset = softreset;
1487 else {
1488 /* prereset told us not to reset, bang classes and return */
1489 for (i = 0; i < ATA_MAX_DEVICES; i++)
1490 classes[i] = ATA_DEV_NONE;
1491 return 0;
1492 }
1493
1494 /* did prereset() screw up? if so, fix up to avoid oopsing */
1495 if (!reset) {
1496 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1497 "invalid reset type\n");
1498 if (softreset)
1499 reset = softreset;
1500 else
1501 reset = hardreset;
1502 }
1503
1504 retry:
1505 /* shut up during boot probing */
1506 if (verbose)
1507 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1508 reset == softreset ? "soft" : "hard");
1509
1510 /* reset */
1511 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1512 ehc->i.flags |= ATA_EHI_DID_RESET;
1513
1514 rc = ata_do_reset(ap, reset, classes);
1515
1516 did_followup_srst = 0;
1517 if (reset == hardreset &&
1518 ata_eh_followup_srst_needed(rc, classify, classes)) {
1519 /* okay, let's do follow-up softreset */
1520 did_followup_srst = 1;
1521 reset = softreset;
1522
1523 if (!reset) {
1524 ata_port_printk(ap, KERN_ERR,
1525 "follow-up softreset required "
1526 "but no softreset avaliable\n");
1527 return -EINVAL;
1528 }
1529
1530 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1531 rc = ata_do_reset(ap, reset, classes);
1532
1533 if (rc == 0 && classify &&
1534 classes[0] == ATA_DEV_UNKNOWN) {
1535 ata_port_printk(ap, KERN_ERR,
1536 "classification failed\n");
1537 return -EINVAL;
1538 }
1539 }
1540
1541 if (rc && --tries) {
1542 const char *type;
1543
1544 if (reset == softreset) {
1545 if (did_followup_srst)
1546 type = "follow-up soft";
1547 else
1548 type = "soft";
1549 } else
1550 type = "hard";
1551
1552 ata_port_printk(ap, KERN_WARNING,
1553 "%sreset failed, retrying in 5 secs\n", type);
1554 ssleep(5);
1555
1556 if (reset == hardreset)
1557 sata_down_spd_limit(ap);
1558 if (hardreset)
1559 reset = hardreset;
1560 goto retry;
1561 }
1562
1563 if (rc == 0) {
1564 /* After the reset, the device state is PIO 0 and the
1565 * controller state is undefined. Record the mode.
1566 */
1567 for (i = 0; i < ATA_MAX_DEVICES; i++)
1568 ap->device[i].pio_mode = XFER_PIO_0;
1569
1570 if (postreset)
1571 postreset(ap, classes);
1572
1573 /* reset successful, schedule revalidation */
1574 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
1575 ehc->i.action |= ATA_EH_REVALIDATE;
1576 }
1577
1578 return rc;
1579}
1580
1581static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1582 struct ata_device **r_failed_dev)
1583{
1584 struct ata_eh_context *ehc = &ap->eh_context;
1585 struct ata_device *dev;
1586 unsigned long flags;
1587 int i, rc = 0;
1588
1589 DPRINTK("ENTER\n");
1590
1591 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1592 unsigned int action;
1593
1594 dev = &ap->device[i];
1595 action = ehc->i.action | ehc->i.dev_action[dev->devno];
1596
1597 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1598 if (ata_port_offline(ap)) {
1599 rc = -EIO;
1600 break;
1601 }
1602
1603 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1604 rc = ata_dev_revalidate(dev,
1605 ehc->i.flags & ATA_EHI_DID_RESET);
1606 if (rc)
1607 break;
1608
1609 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1610
1611 /* schedule the scsi_rescan_device() here */
1612 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1613 } else if (dev->class == ATA_DEV_UNKNOWN &&
1614 ehc->tries[dev->devno] &&
1615 ata_class_enabled(ehc->classes[dev->devno])) {
1616 dev->class = ehc->classes[dev->devno];
1617
1618 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1619 if (rc == 0)
1620 rc = ata_dev_configure(dev, 1);
1621
1622 if (rc) {
1623 dev->class = ATA_DEV_UNKNOWN;
1624 break;
1625 }
1626
1627 spin_lock_irqsave(ap->lock, flags);
1628 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1629 spin_unlock_irqrestore(ap->lock, flags);
1630 }
1631 }
1632
1633 if (rc)
1634 *r_failed_dev = dev;
1635
1636 DPRINTK("EXIT\n");
1637 return rc;
1638}
1639
1640static int ata_port_nr_enabled(struct ata_port *ap)
1641{
1642 int i, cnt = 0;
1643
1644 for (i = 0; i < ATA_MAX_DEVICES; i++)
1645 if (ata_dev_enabled(&ap->device[i]))
1646 cnt++;
1647 return cnt;
1648}
1649
1650static int ata_port_nr_vacant(struct ata_port *ap)
1651{
1652 int i, cnt = 0;
1653
1654 for (i = 0; i < ATA_MAX_DEVICES; i++)
1655 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1656 cnt++;
1657 return cnt;
1658}
1659
1660static int ata_eh_skip_recovery(struct ata_port *ap)
1661{
1662 struct ata_eh_context *ehc = &ap->eh_context;
1663 int i;
1664
1665 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1666 return 0;
1667
1668 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1669 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1670 struct ata_device *dev = &ap->device[i];
1671
1672 if (dev->class == ATA_DEV_UNKNOWN &&
1673 ehc->classes[dev->devno] != ATA_DEV_NONE)
1674 return 0;
1675 }
1676
1677 return 1;
1678}
1679
1680/**
1681 * ata_eh_recover - recover host port after error
1682 * @ap: host port to recover
1683 * @prereset: prereset method (can be NULL)
1684 * @softreset: softreset method (can be NULL)
1685 * @hardreset: hardreset method (can be NULL)
1686 * @postreset: postreset method (can be NULL)
1687 *
1688 * This is the alpha and omega, eum and yang, heart and soul of
1689 * libata exception handling. On entry, actions required to
1690 * recover the port and hotplug requests are recorded in
1691 * eh_context. This function executes all the operations with
1692 * appropriate retrials and fallbacks to resurrect failed
1693 * devices, detach goners and greet newcomers.
1694 *
1695 * LOCKING:
1696 * Kernel thread context (may sleep).
1697 *
1698 * RETURNS:
1699 * 0 on success, -errno on failure.
1700 */
1701static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1702 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1703 ata_postreset_fn_t postreset)
1704{
1705 struct ata_eh_context *ehc = &ap->eh_context;
1706 struct ata_device *dev;
1707 int down_xfermask, i, rc;
1708
1709 DPRINTK("ENTER\n");
1710
1711 /* prep for recovery */
1712 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1713 dev = &ap->device[i];
1714
1715 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1716
1717 /* process hotplug request */
1718 if (dev->flags & ATA_DFLAG_DETACH)
1719 ata_eh_detach_dev(dev);
1720
1721 if (!ata_dev_enabled(dev) &&
1722 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1723 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1724 ata_eh_detach_dev(dev);
1725 ata_dev_init(dev);
1726 ehc->did_probe_mask |= (1 << dev->devno);
1727 ehc->i.action |= ATA_EH_SOFTRESET;
1728 }
1729 }
1730
1731 retry:
1732 down_xfermask = 0;
1733 rc = 0;
1734
1735 /* if UNLOADING, finish immediately */
1736 if (ap->flags & ATA_FLAG_UNLOADING)
1737 goto out;
1738
1739 /* skip EH if possible. */
1740 if (ata_eh_skip_recovery(ap))
1741 ehc->i.action = 0;
1742
1743 for (i = 0; i < ATA_MAX_DEVICES; i++)
1744 ehc->classes[i] = ATA_DEV_UNKNOWN;
1745
1746 /* reset */
1747 if (ehc->i.action & ATA_EH_RESET_MASK) {
1748 ata_eh_freeze_port(ap);
1749
1750 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1751 softreset, hardreset, postreset);
1752 if (rc) {
1753 ata_port_printk(ap, KERN_ERR,
1754 "reset failed, giving up\n");
1755 goto out;
1756 }
1757
1758 ata_eh_thaw_port(ap);
1759 }
1760
1761 /* revalidate existing devices and attach new ones */
1762 rc = ata_eh_revalidate_and_attach(ap, &dev);
1763 if (rc)
1764 goto dev_fail;
1765
1766 /* configure transfer mode if the port has been reset */
1767 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1768 rc = ata_set_mode(ap, &dev);
1769 if (rc) {
1770 down_xfermask = 1;
1771 goto dev_fail;
1772 }
1773 }
1774
1775 goto out;
1776
1777 dev_fail:
1778 switch (rc) {
1779 case -ENODEV:
1780 /* device missing, schedule probing */
1781 ehc->i.probe_mask |= (1 << dev->devno);
1782 case -EINVAL:
1783 ehc->tries[dev->devno] = 0;
1784 break;
1785 case -EIO:
1786 sata_down_spd_limit(ap);
1787 default:
1788 ehc->tries[dev->devno]--;
1789 if (down_xfermask &&
1790 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1791 ehc->tries[dev->devno] = 0;
1792 }
1793
1794 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1795 /* disable device if it has used up all its chances */
1796 ata_dev_disable(dev);
1797
1798 /* detach if offline */
1799 if (ata_port_offline(ap))
1800 ata_eh_detach_dev(dev);
1801
1802 /* probe if requested */
1803 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1804 !(ehc->did_probe_mask & (1 << dev->devno))) {
1805 ata_eh_detach_dev(dev);
1806 ata_dev_init(dev);
1807
1808 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1809 ehc->did_probe_mask |= (1 << dev->devno);
1810 ehc->i.action |= ATA_EH_SOFTRESET;
1811 }
1812 } else {
1813 /* soft didn't work? be haaaaard */
1814 if (ehc->i.flags & ATA_EHI_DID_RESET)
1815 ehc->i.action |= ATA_EH_HARDRESET;
1816 else
1817 ehc->i.action |= ATA_EH_SOFTRESET;
1818 }
1819
1820 if (ata_port_nr_enabled(ap)) {
1821 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1822 "devices, retrying in 5 secs\n");
1823 ssleep(5);
1824 } else {
1825 /* no device left, repeat fast */
1826 msleep(500);
1827 }
1828
1829 goto retry;
1830
1831 out:
1832 if (rc) {
1833 for (i = 0; i < ATA_MAX_DEVICES; i++)
1834 ata_dev_disable(&ap->device[i]);
1835 }
1836
1837 DPRINTK("EXIT, rc=%d\n", rc);
1838 return rc;
1839}
1840
1841/**
1842 * ata_eh_finish - finish up EH
1843 * @ap: host port to finish EH for
1844 *
1845 * Recovery is complete. Clean up EH states and retry or finish
1846 * failed qcs.
1847 *
1848 * LOCKING:
1849 * None.
1850 */
1851static void ata_eh_finish(struct ata_port *ap)
1852{
1853 int tag;
1854
1855 /* retry or finish qcs */
1856 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1857 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1858
1859 if (!(qc->flags & ATA_QCFLAG_FAILED))
1860 continue;
1861
1862 if (qc->err_mask) {
1863 /* FIXME: Once EH migration is complete,
1864 * generate sense data in this function,
1865 * considering both err_mask and tf.
1866 */
1867 if (qc->err_mask & AC_ERR_INVALID)
1868 ata_eh_qc_complete(qc);
1869 else
1870 ata_eh_qc_retry(qc);
1871 } else {
1872 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1873 ata_eh_qc_complete(qc);
1874 } else {
1875 /* feed zero TF to sense generation */
1876 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1877 ata_eh_qc_retry(qc);
1878 }
1879 }
1880 }
1881}
1882
1883/**
1884 * ata_do_eh - do standard error handling
1885 * @ap: host port to handle error for
1886 * @prereset: prereset method (can be NULL)
1887 * @softreset: softreset method (can be NULL)
1888 * @hardreset: hardreset method (can be NULL)
1889 * @postreset: postreset method (can be NULL)
1890 *
1891 * Perform standard error handling sequence.
1892 *
1893 * LOCKING:
1894 * Kernel thread context (may sleep).
1895 */
1896void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1897 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1898 ata_postreset_fn_t postreset)
1899{
1900 if (!(ap->flags & ATA_FLAG_LOADING)) {
1901 ata_eh_autopsy(ap);
1902 ata_eh_report(ap);
1903 }
1904
1905 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1906 ata_eh_finish(ap);
1907}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
deleted file mode 100644
index 93d18a74c401..000000000000
--- a/drivers/scsi/libata-scsi.c
+++ /dev/null
@@ -1,3052 +0,0 @@
1/*
2 * libata-scsi.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <asm/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
55
56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
57 const struct scsi_device *scsidev);
58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
63
64#define RW_RECOVERY_MPAGE 0x1
65#define RW_RECOVERY_MPAGE_LEN 12
66#define CACHE_MPAGE 0x8
67#define CACHE_MPAGE_LEN 20
68#define CONTROL_MPAGE 0xa
69#define CONTROL_MPAGE_LEN 12
70#define ALL_MPAGES 0x3f
71#define ALL_SUB_MPAGES 0xff
72
73
74static const u8 def_rw_recovery_mpage[] = {
75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */
80 0, 0, 0, 0,
81 0, /* write retry count */
82 0, 0, 0
83};
84
85static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
86 CACHE_MPAGE,
87 CACHE_MPAGE_LEN - 2,
88 0, /* contains WCE, needs to be 0 for logic */
89 0, 0, 0, 0, 0, 0, 0, 0, 0,
90 0, /* contains DRA, needs to be 0 for logic */
91 0, 0, 0, 0, 0, 0, 0
92};
93
94static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
95 CONTROL_MPAGE,
96 CONTROL_MPAGE_LEN - 2,
97 2, /* DSENSE=0, GLTSD=1 */
98 0, /* [QAM+QERR may be 1, see 05-359r1] */
99 0, 0, 0, 0, 0xff, 0xff,
100 0, 30 /* extended self test time, see 05-359r1 */
101};
102
103/*
104 * libata transport template. libata doesn't do real transport stuff.
105 * It just needs the eh_timed_out hook.
106 */
107struct scsi_transport_template ata_scsi_transport_template = {
108 .eh_strategy_handler = ata_scsi_error,
109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
111};
112
113
114static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
115 void (*done)(struct scsi_cmnd *))
116{
117 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
118 /* "Invalid field in cbd" */
119 done(cmd);
120}
121
122/**
123 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
124 * @sdev: SCSI device for which BIOS geometry is to be determined
125 * @bdev: block device associated with @sdev
126 * @capacity: capacity of SCSI device
127 * @geom: location to which geometry will be output
128 *
129 * Generic bios head/sector/cylinder calculator
130 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
131 * mapping. Some situations may arise where the disk is not
132 * bootable if this is not used.
133 *
134 * LOCKING:
135 * Defined by the SCSI layer. We don't really care.
136 *
137 * RETURNS:
138 * Zero.
139 */
140int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
141 sector_t capacity, int geom[])
142{
143 geom[0] = 255;
144 geom[1] = 63;
145 sector_div(capacity, 255*63);
146 geom[2] = capacity;
147
148 return 0;
149}
150
151/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{
165 int rc = 0;
166 u8 scsi_cmd[MAX_COMMAND_SIZE];
167 u8 args[4], *argbuf = NULL;
168 int argsize = 0;
169 struct scsi_sense_hdr sshdr;
170 enum dma_data_direction data_dir;
171
172 if (arg == NULL)
173 return -EINVAL;
174
175 if (copy_from_user(args, arg, sizeof(args)))
176 return -EFAULT;
177
178 memset(scsi_cmd, 0, sizeof(scsi_cmd));
179
180 if (args[3]) {
181 argsize = SECTOR_SIZE * args[3];
182 argbuf = kmalloc(argsize, GFP_KERNEL);
183 if (argbuf == NULL) {
184 rc = -ENOMEM;
185 goto error;
186 }
187
188 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
189 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
190 block count in sector count field */
191 data_dir = DMA_FROM_DEVICE;
192 } else {
193 scsi_cmd[1] = (3 << 1); /* Non-data */
194 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
195 data_dir = DMA_NONE;
196 }
197
198 scsi_cmd[0] = ATA_16;
199
200 scsi_cmd[4] = args[2];
201 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
202 scsi_cmd[6] = args[3];
203 scsi_cmd[8] = args[1];
204 scsi_cmd[10] = 0x4f;
205 scsi_cmd[12] = 0xc2;
206 } else {
207 scsi_cmd[6] = args[1];
208 }
209 scsi_cmd[14] = args[0];
210
211 /* Good values for timeout and retries? Values below
212 from scsi_ioctl_send_command() for default case... */
213 if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
214 &sshdr, (10*HZ), 5)) {
215 rc = -EIO;
216 goto error;
217 }
218
219 /* Need code to retrieve data from check condition? */
220
221 if ((argbuf)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT;
224error:
225 if (argbuf)
226 kfree(argbuf);
227
228 return rc;
229}
230
231/**
232 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
233 * @scsidev: Device to which we are issuing command
234 * @arg: User provided data for issuing command
235 *
236 * LOCKING:
237 * Defined by the SCSI layer. We don't really care.
238 *
239 * RETURNS:
240 * Zero on success, negative errno on error.
241 */
242int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
243{
244 int rc = 0;
245 u8 scsi_cmd[MAX_COMMAND_SIZE];
246 u8 args[7];
247 struct scsi_sense_hdr sshdr;
248
249 if (arg == NULL)
250 return -EINVAL;
251
252 if (copy_from_user(args, arg, sizeof(args)))
253 return -EFAULT;
254
255 memset(scsi_cmd, 0, sizeof(scsi_cmd));
256 scsi_cmd[0] = ATA_16;
257 scsi_cmd[1] = (3 << 1); /* Non-data */
258 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
259 scsi_cmd[4] = args[1];
260 scsi_cmd[6] = args[2];
261 scsi_cmd[8] = args[3];
262 scsi_cmd[10] = args[4];
263 scsi_cmd[12] = args[5];
264 scsi_cmd[14] = args[0];
265
266 /* Good values for timeout and retries? Values below
267 from scsi_ioctl_send_command() for default case... */
268 if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
269 (10*HZ), 5))
270 rc = -EIO;
271
272 /* Need code to retrieve data from check condition? */
273 return rc;
274}
275
276int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
277{
278 int val = -EINVAL, rc = -EINVAL;
279
280 switch (cmd) {
281 case ATA_IOC_GET_IO32:
282 val = 0;
283 if (copy_to_user(arg, &val, 1))
284 return -EFAULT;
285 return 0;
286
287 case ATA_IOC_SET_IO32:
288 val = (unsigned long) arg;
289 if (val != 0)
290 return -EINVAL;
291 return 0;
292
293 case HDIO_DRIVE_CMD:
294 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
295 return -EACCES;
296 return ata_cmd_ioctl(scsidev, arg);
297
298 case HDIO_DRIVE_TASK:
299 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
300 return -EACCES;
301 return ata_task_ioctl(scsidev, arg);
302
303 default:
304 rc = -ENOTTY;
305 break;
306 }
307
308 return rc;
309}
310
311/**
312 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
313 * @dev: ATA device to which the new command is attached
314 * @cmd: SCSI command that originated this ATA command
315 * @done: SCSI command completion function
316 *
317 * Obtain a reference to an unused ata_queued_cmd structure,
318 * which is the basic libata structure representing a single
319 * ATA command sent to the hardware.
320 *
321 * If a command was available, fill in the SCSI-specific
322 * portions of the structure with information on the
323 * current command.
324 *
325 * LOCKING:
326 * spin_lock_irqsave(host_set lock)
327 *
328 * RETURNS:
329 * Command allocated, or %NULL if none available.
330 */
331struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
332 struct scsi_cmnd *cmd,
333 void (*done)(struct scsi_cmnd *))
334{
335 struct ata_queued_cmd *qc;
336
337 qc = ata_qc_new_init(dev);
338 if (qc) {
339 qc->scsicmd = cmd;
340 qc->scsidone = done;
341
342 if (cmd->use_sg) {
343 qc->__sg = (struct scatterlist *) cmd->request_buffer;
344 qc->n_elem = cmd->use_sg;
345 } else {
346 qc->__sg = &qc->sgent;
347 qc->n_elem = 1;
348 }
349 } else {
350 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
351 done(cmd);
352 }
353
354 return qc;
355}
356
357/**
358 * ata_dump_status - user friendly display of error info
359 * @id: id of the port in question
360 * @tf: ptr to filled out taskfile
361 *
362 * Decode and dump the ATA error/status registers for the user so
363 * that they have some idea what really happened at the non
364 * make-believe layer.
365 *
366 * LOCKING:
367 * inherited from caller
368 */
369void ata_dump_status(unsigned id, struct ata_taskfile *tf)
370{
371 u8 stat = tf->command, err = tf->feature;
372
373 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
374 if (stat & ATA_BUSY) {
375 printk("Busy }\n"); /* Data is not valid in this case */
376 } else {
377 if (stat & 0x40) printk("DriveReady ");
378 if (stat & 0x20) printk("DeviceFault ");
379 if (stat & 0x10) printk("SeekComplete ");
380 if (stat & 0x08) printk("DataRequest ");
381 if (stat & 0x04) printk("CorrectedError ");
382 if (stat & 0x02) printk("Index ");
383 if (stat & 0x01) printk("Error ");
384 printk("}\n");
385
386 if (err) {
387 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
388 if (err & 0x04) printk("DriveStatusError ");
389 if (err & 0x80) {
390 if (err & 0x04) printk("BadCRC ");
391 else printk("Sector ");
392 }
393 if (err & 0x40) printk("UncorrectableError ");
394 if (err & 0x10) printk("SectorIdNotFound ");
395 if (err & 0x02) printk("TrackZeroNotFound ");
396 if (err & 0x01) printk("AddrMarkNotFound ");
397 printk("}\n");
398 }
399 }
400}
401
402int ata_scsi_device_resume(struct scsi_device *sdev)
403{
404 struct ata_port *ap = ata_shost_to_port(sdev->host);
405 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
406
407 return ata_device_resume(dev);
408}
409
410int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
411{
412 struct ata_port *ap = ata_shost_to_port(sdev->host);
413 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
414
415 return ata_device_suspend(dev, state);
416}
417
418/**
419 * ata_to_sense_error - convert ATA error to SCSI error
420 * @id: ATA device number
421 * @drv_stat: value contained in ATA status register
422 * @drv_err: value contained in ATA error register
423 * @sk: the sense key we'll fill out
424 * @asc: the additional sense code we'll fill out
425 * @ascq: the additional sense code qualifier we'll fill out
426 * @verbose: be verbose
427 *
428 * Converts an ATA error into a SCSI error. Fill out pointers to
429 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
430 * format sense blocks.
431 *
432 * LOCKING:
433 * spin_lock_irqsave(host_set lock)
434 */
435void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
436 u8 *ascq, int verbose)
437{
438 int i;
439
440 /* Based on the 3ware driver translation table */
441 static const unsigned char sense_table[][4] = {
442 /* BBD|ECC|ID|MAR */
443 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
444 /* BBD|ECC|ID */
445 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
446 /* ECC|MC|MARK */
447 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
448 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
449 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
450 /* MC|ID|ABRT|TRK0|MARK */
451 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
452 /* MCR|MARK */
453 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
454 /* Bad address mark */
455 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
456 /* TRK0 */
457 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
458 /* Abort & !ICRC */
459 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
460 /* Media change request */
461 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
462 /* SRV */
463 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
464 /* Media change */
465 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
466 /* ECC */
467 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
468 /* BBD - block marked bad */
469 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
470 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
471 };
472 static const unsigned char stat_table[][4] = {
473 /* Must be first because BUSY means no other bits valid */
474 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
475 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
476 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
477 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
478 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
479 };
480
481 /*
482 * Is this an error we can process/parse
483 */
484 if (drv_stat & ATA_BUSY) {
485 drv_err = 0; /* Ignore the err bits, they're invalid */
486 }
487
488 if (drv_err) {
489 /* Look for drv_err */
490 for (i = 0; sense_table[i][0] != 0xFF; i++) {
491 /* Look for best matches first */
492 if ((sense_table[i][0] & drv_err) ==
493 sense_table[i][0]) {
494 *sk = sense_table[i][1];
495 *asc = sense_table[i][2];
496 *ascq = sense_table[i][3];
497 goto translate_done;
498 }
499 }
500 /* No immediate match */
501 if (verbose)
502 printk(KERN_WARNING "ata%u: no sense translation for "
503 "error 0x%02x\n", id, drv_err);
504 }
505
506 /* Fall back to interpreting status bits */
507 for (i = 0; stat_table[i][0] != 0xFF; i++) {
508 if (stat_table[i][0] & drv_stat) {
509 *sk = stat_table[i][1];
510 *asc = stat_table[i][2];
511 *ascq = stat_table[i][3];
512 goto translate_done;
513 }
514 }
515 /* No error? Undecoded? */
516 if (verbose)
517 printk(KERN_WARNING "ata%u: no sense translation for "
518 "status: 0x%02x\n", id, drv_stat);
519
520 /* We need a sensible error return here, which is tricky, and one
521 that won't cause people to do things like return a disk wrongly */
522 *sk = ABORTED_COMMAND;
523 *asc = 0x00;
524 *ascq = 0x00;
525
526 translate_done:
527 if (verbose)
528 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
529 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
530 id, drv_stat, drv_err, *sk, *asc, *ascq);
531 return;
532}
533
534/*
535 * ata_gen_ata_desc_sense - Generate check condition sense block.
536 * @qc: Command that completed.
537 *
538 * This function is specific to the ATA descriptor format sense
539 * block specified for the ATA pass through commands. Regardless
540 * of whether the command errored or not, return a sense
541 * block. Copy all controller registers into the sense
542 * block. Clear sense key, ASC & ASCQ if there is no error.
543 *
544 * LOCKING:
545 * spin_lock_irqsave(host_set lock)
546 */
547void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
548{
549 struct scsi_cmnd *cmd = qc->scsicmd;
550 struct ata_taskfile *tf = &qc->result_tf;
551 unsigned char *sb = cmd->sense_buffer;
552 unsigned char *desc = sb + 8;
553 int verbose = qc->ap->ops->error_handler == NULL;
554
555 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
556
557 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
558
559 /*
560 * Use ata_to_sense_error() to map status register bits
561 * onto sense key, asc & ascq.
562 */
563 if (qc->err_mask ||
564 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
565 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
566 &sb[1], &sb[2], &sb[3], verbose);
567 sb[1] &= 0x0f;
568 }
569
570 /*
571 * Sense data is current and format is descriptor.
572 */
573 sb[0] = 0x72;
574
575 desc[0] = 0x09;
576
577 /*
578 * Set length of additional sense data.
579 * Since we only populate descriptor 0, the total
580 * length is the same (fixed) length as descriptor 0.
581 */
582 desc[1] = sb[7] = 14;
583
584 /*
585 * Copy registers into sense buffer.
586 */
587 desc[2] = 0x00;
588 desc[3] = tf->feature; /* == error reg */
589 desc[5] = tf->nsect;
590 desc[7] = tf->lbal;
591 desc[9] = tf->lbam;
592 desc[11] = tf->lbah;
593 desc[12] = tf->device;
594 desc[13] = tf->command; /* == status reg */
595
596 /*
597 * Fill in Extend bit, and the high order bytes
598 * if applicable.
599 */
600 if (tf->flags & ATA_TFLAG_LBA48) {
601 desc[2] |= 0x01;
602 desc[4] = tf->hob_nsect;
603 desc[6] = tf->hob_lbal;
604 desc[8] = tf->hob_lbam;
605 desc[10] = tf->hob_lbah;
606 }
607}
608
609/**
610 * ata_gen_fixed_sense - generate a SCSI fixed sense block
611 * @qc: Command that we are erroring out
612 *
613 * Leverage ata_to_sense_error() to give us the codes. Fit our
614 * LBA in here if there's room.
615 *
616 * LOCKING:
617 * inherited from caller
618 */
619void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
620{
621 struct scsi_cmnd *cmd = qc->scsicmd;
622 struct ata_taskfile *tf = &qc->result_tf;
623 unsigned char *sb = cmd->sense_buffer;
624 int verbose = qc->ap->ops->error_handler == NULL;
625
626 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
627
628 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
629
630 /*
631 * Use ata_to_sense_error() to map status register bits
632 * onto sense key, asc & ascq.
633 */
634 if (qc->err_mask ||
635 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
637 &sb[2], &sb[12], &sb[13], verbose);
638 sb[2] &= 0x0f;
639 }
640
641 sb[0] = 0x70;
642 sb[7] = 0x0a;
643
644 if (tf->flags & ATA_TFLAG_LBA48) {
645 /* TODO: find solution for LBA48 descriptors */
646 }
647
648 else if (tf->flags & ATA_TFLAG_LBA) {
649 /* A small (28b) LBA will fit in the 32b info field */
650 sb[0] |= 0x80; /* set valid bit */
651 sb[3] = tf->device & 0x0f;
652 sb[4] = tf->lbah;
653 sb[5] = tf->lbam;
654 sb[6] = tf->lbal;
655 }
656
657 else {
658 /* TODO: C/H/S */
659 }
660}
661
662static void ata_scsi_sdev_config(struct scsi_device *sdev)
663{
664 sdev->use_10_for_rw = 1;
665 sdev->use_10_for_ms = 1;
666}
667
668static void ata_scsi_dev_config(struct scsi_device *sdev,
669 struct ata_device *dev)
670{
671 unsigned int max_sectors;
672
673 /* TODO: 2048 is an arbitrary number, not the
674 * hardware maximum. This should be increased to
675 * 65534 when Jens Axboe's patch for dynamically
676 * determining max_sectors is merged.
677 */
678 max_sectors = ATA_MAX_SECTORS;
679 if (dev->flags & ATA_DFLAG_LBA48)
680 max_sectors = ATA_MAX_SECTORS_LBA48;
681 if (dev->max_sectors)
682 max_sectors = dev->max_sectors;
683
684 blk_queue_max_sectors(sdev->request_queue, max_sectors);
685
686 /*
687 * SATA DMA transfers must be multiples of 4 byte, so
688 * we need to pad ATAPI transfers using an extra sg.
689 * Decrement max hw segments accordingly.
690 */
691 if (dev->class == ATA_DEV_ATAPI) {
692 request_queue_t *q = sdev->request_queue;
693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
694 }
695
696 if (dev->flags & ATA_DFLAG_NCQ) {
697 int depth;
698
699 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
700 depth = min(ATA_MAX_QUEUE - 1, depth);
701 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
702 }
703}
704
705/**
706 * ata_scsi_slave_config - Set SCSI device attributes
707 * @sdev: SCSI device to examine
708 *
709 * This is called before we actually start reading
710 * and writing to the device, to configure certain
711 * SCSI mid-layer behaviors.
712 *
713 * LOCKING:
714 * Defined by SCSI layer. We don't really care.
715 */
716
717int ata_scsi_slave_config(struct scsi_device *sdev)
718{
719 struct ata_port *ap = ata_shost_to_port(sdev->host);
720 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
721
722 ata_scsi_sdev_config(sdev);
723
724 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
725
726 if (dev)
727 ata_scsi_dev_config(sdev, dev);
728
729 return 0; /* scsi layer doesn't check return value, sigh */
730}
731
732/**
733 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
734 * @sdev: SCSI device to be destroyed
735 *
736 * @sdev is about to be destroyed for hot/warm unplugging. If
737 * this unplugging was initiated by libata as indicated by NULL
738 * dev->sdev, this function doesn't have to do anything.
739 * Otherwise, SCSI layer initiated warm-unplug is in progress.
740 * Clear dev->sdev, schedule the device for ATA detach and invoke
741 * EH.
742 *
743 * LOCKING:
744 * Defined by SCSI layer. We don't really care.
745 */
746void ata_scsi_slave_destroy(struct scsi_device *sdev)
747{
748 struct ata_port *ap = ata_shost_to_port(sdev->host);
749 unsigned long flags;
750 struct ata_device *dev;
751
752 if (!ap->ops->error_handler)
753 return;
754
755 spin_lock_irqsave(ap->lock, flags);
756 dev = __ata_scsi_find_dev(ap, sdev);
757 if (dev && dev->sdev) {
758 /* SCSI device already in CANCEL state, no need to offline it */
759 dev->sdev = NULL;
760 dev->flags |= ATA_DFLAG_DETACH;
761 ata_port_schedule_eh(ap);
762 }
763 spin_unlock_irqrestore(ap->lock, flags);
764}
765
766/**
767 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
768 * @sdev: SCSI device to configure queue depth for
769 * @queue_depth: new queue depth
770 *
771 * This is libata standard hostt->change_queue_depth callback.
772 * SCSI will call into this callback when user tries to set queue
773 * depth via sysfs.
774 *
775 * LOCKING:
776 * SCSI layer (we don't care)
777 *
778 * RETURNS:
779 * Newly configured queue depth.
780 */
781int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
782{
783 struct ata_port *ap = ata_shost_to_port(sdev->host);
784 struct ata_device *dev;
785 int max_depth;
786
787 if (queue_depth < 1)
788 return sdev->queue_depth;
789
790 dev = ata_scsi_find_dev(ap, sdev);
791 if (!dev || !ata_dev_enabled(dev))
792 return sdev->queue_depth;
793
794 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
795 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
796 if (queue_depth > max_depth)
797 queue_depth = max_depth;
798
799 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
800 return queue_depth;
801}
802
803/**
804 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
805 * @qc: Storage for translated ATA taskfile
806 * @scsicmd: SCSI command to translate
807 *
808 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
809 * (to start). Perhaps these commands should be preceded by
810 * CHECK POWER MODE to see what power mode the device is already in.
811 * [See SAT revision 5 at www.t10.org]
812 *
813 * LOCKING:
814 * spin_lock_irqsave(host_set lock)
815 *
816 * RETURNS:
817 * Zero on success, non-zero on error.
818 */
819
820static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
821 const u8 *scsicmd)
822{
823 struct ata_taskfile *tf = &qc->tf;
824
825 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
826 tf->protocol = ATA_PROT_NODATA;
827 if (scsicmd[1] & 0x1) {
828 ; /* ignore IMMED bit, violates sat-r05 */
829 }
830 if (scsicmd[4] & 0x2)
831 goto invalid_fld; /* LOEJ bit set not supported */
832 if (((scsicmd[4] >> 4) & 0xf) != 0)
833 goto invalid_fld; /* power conditions not supported */
834 if (scsicmd[4] & 0x1) {
835 tf->nsect = 1; /* 1 sector, lba=0 */
836
837 if (qc->dev->flags & ATA_DFLAG_LBA) {
838 tf->flags |= ATA_TFLAG_LBA;
839
840 tf->lbah = 0x0;
841 tf->lbam = 0x0;
842 tf->lbal = 0x0;
843 tf->device |= ATA_LBA;
844 } else {
845 /* CHS */
846 tf->lbal = 0x1; /* sect */
847 tf->lbam = 0x0; /* cyl low */
848 tf->lbah = 0x0; /* cyl high */
849 }
850
851 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
852 } else {
853 tf->nsect = 0; /* time period value (0 implies now) */
854 tf->command = ATA_CMD_STANDBY;
855 /* Consider: ATA STANDBY IMMEDIATE command */
856 }
857 /*
858 * Standby and Idle condition timers could be implemented but that
859 * would require libata to implement the Power condition mode page
860 * and allow the user to change it. Changing mode pages requires
861 * MODE SELECT to be implemented.
862 */
863
864 return 0;
865
866invalid_fld:
867 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
868 /* "Invalid field in cbd" */
869 return 1;
870}
871
872
873/**
874 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
875 * @qc: Storage for translated ATA taskfile
876 * @scsicmd: SCSI command to translate (ignored)
877 *
878 * Sets up an ATA taskfile to issue FLUSH CACHE or
879 * FLUSH CACHE EXT.
880 *
881 * LOCKING:
882 * spin_lock_irqsave(host_set lock)
883 *
884 * RETURNS:
885 * Zero on success, non-zero on error.
886 */
887
888static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
889{
890 struct ata_taskfile *tf = &qc->tf;
891
892 tf->flags |= ATA_TFLAG_DEVICE;
893 tf->protocol = ATA_PROT_NODATA;
894
895 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
896 (ata_id_has_flush_ext(qc->dev->id)))
897 tf->command = ATA_CMD_FLUSH_EXT;
898 else
899 tf->command = ATA_CMD_FLUSH;
900
901 return 0;
902}
903
904/**
905 * scsi_6_lba_len - Get LBA and transfer length
906 * @scsicmd: SCSI command to translate
907 *
908 * Calculate LBA and transfer length for 6-byte commands.
909 *
910 * RETURNS:
911 * @plba: the LBA
912 * @plen: the transfer length
913 */
914
915static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
916{
917 u64 lba = 0;
918 u32 len = 0;
919
920 VPRINTK("six-byte command\n");
921
922 lba |= ((u64)scsicmd[2]) << 8;
923 lba |= ((u64)scsicmd[3]);
924
925 len |= ((u32)scsicmd[4]);
926
927 *plba = lba;
928 *plen = len;
929}
930
931/**
932 * scsi_10_lba_len - Get LBA and transfer length
933 * @scsicmd: SCSI command to translate
934 *
935 * Calculate LBA and transfer length for 10-byte commands.
936 *
937 * RETURNS:
938 * @plba: the LBA
939 * @plen: the transfer length
940 */
941
942static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
943{
944 u64 lba = 0;
945 u32 len = 0;
946
947 VPRINTK("ten-byte command\n");
948
949 lba |= ((u64)scsicmd[2]) << 24;
950 lba |= ((u64)scsicmd[3]) << 16;
951 lba |= ((u64)scsicmd[4]) << 8;
952 lba |= ((u64)scsicmd[5]);
953
954 len |= ((u32)scsicmd[7]) << 8;
955 len |= ((u32)scsicmd[8]);
956
957 *plba = lba;
958 *plen = len;
959}
960
961/**
962 * scsi_16_lba_len - Get LBA and transfer length
963 * @scsicmd: SCSI command to translate
964 *
965 * Calculate LBA and transfer length for 16-byte commands.
966 *
967 * RETURNS:
968 * @plba: the LBA
969 * @plen: the transfer length
970 */
971
972static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
973{
974 u64 lba = 0;
975 u32 len = 0;
976
977 VPRINTK("sixteen-byte command\n");
978
979 lba |= ((u64)scsicmd[2]) << 56;
980 lba |= ((u64)scsicmd[3]) << 48;
981 lba |= ((u64)scsicmd[4]) << 40;
982 lba |= ((u64)scsicmd[5]) << 32;
983 lba |= ((u64)scsicmd[6]) << 24;
984 lba |= ((u64)scsicmd[7]) << 16;
985 lba |= ((u64)scsicmd[8]) << 8;
986 lba |= ((u64)scsicmd[9]);
987
988 len |= ((u32)scsicmd[10]) << 24;
989 len |= ((u32)scsicmd[11]) << 16;
990 len |= ((u32)scsicmd[12]) << 8;
991 len |= ((u32)scsicmd[13]);
992
993 *plba = lba;
994 *plen = len;
995}
996
997/**
998 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
999 * @qc: Storage for translated ATA taskfile
1000 * @scsicmd: SCSI command to translate
1001 *
1002 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1003 *
1004 * LOCKING:
1005 * spin_lock_irqsave(host_set lock)
1006 *
1007 * RETURNS:
1008 * Zero on success, non-zero on error.
1009 */
1010
1011static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1012{
1013 struct ata_taskfile *tf = &qc->tf;
1014 struct ata_device *dev = qc->dev;
1015 u64 dev_sectors = qc->dev->n_sectors;
1016 u64 block;
1017 u32 n_block;
1018
1019 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1020 tf->protocol = ATA_PROT_NODATA;
1021
1022 if (scsicmd[0] == VERIFY)
1023 scsi_10_lba_len(scsicmd, &block, &n_block);
1024 else if (scsicmd[0] == VERIFY_16)
1025 scsi_16_lba_len(scsicmd, &block, &n_block);
1026 else
1027 goto invalid_fld;
1028
1029 if (!n_block)
1030 goto nothing_to_do;
1031 if (block >= dev_sectors)
1032 goto out_of_range;
1033 if ((block + n_block) > dev_sectors)
1034 goto out_of_range;
1035
1036 if (dev->flags & ATA_DFLAG_LBA) {
1037 tf->flags |= ATA_TFLAG_LBA;
1038
1039 if (lba_28_ok(block, n_block)) {
1040 /* use LBA28 */
1041 tf->command = ATA_CMD_VERIFY;
1042 tf->device |= (block >> 24) & 0xf;
1043 } else if (lba_48_ok(block, n_block)) {
1044 if (!(dev->flags & ATA_DFLAG_LBA48))
1045 goto out_of_range;
1046
1047 /* use LBA48 */
1048 tf->flags |= ATA_TFLAG_LBA48;
1049 tf->command = ATA_CMD_VERIFY_EXT;
1050
1051 tf->hob_nsect = (n_block >> 8) & 0xff;
1052
1053 tf->hob_lbah = (block >> 40) & 0xff;
1054 tf->hob_lbam = (block >> 32) & 0xff;
1055 tf->hob_lbal = (block >> 24) & 0xff;
1056 } else
1057 /* request too large even for LBA48 */
1058 goto out_of_range;
1059
1060 tf->nsect = n_block & 0xff;
1061
1062 tf->lbah = (block >> 16) & 0xff;
1063 tf->lbam = (block >> 8) & 0xff;
1064 tf->lbal = block & 0xff;
1065
1066 tf->device |= ATA_LBA;
1067 } else {
1068 /* CHS */
1069 u32 sect, head, cyl, track;
1070
1071 if (!lba_28_ok(block, n_block))
1072 goto out_of_range;
1073
1074 /* Convert LBA to CHS */
1075 track = (u32)block / dev->sectors;
1076 cyl = track / dev->heads;
1077 head = track % dev->heads;
1078 sect = (u32)block % dev->sectors + 1;
1079
1080 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1081 (u32)block, track, cyl, head, sect);
1082
1083 /* Check whether the converted CHS can fit.
1084 Cylinder: 0-65535
1085 Head: 0-15
1086 Sector: 1-255*/
1087 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1088 goto out_of_range;
1089
1090 tf->command = ATA_CMD_VERIFY;
1091 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1092 tf->lbal = sect;
1093 tf->lbam = cyl;
1094 tf->lbah = cyl >> 8;
1095 tf->device |= head;
1096 }
1097
1098 return 0;
1099
1100invalid_fld:
1101 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1102 /* "Invalid field in cbd" */
1103 return 1;
1104
1105out_of_range:
1106 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1107 /* "Logical Block Address out of range" */
1108 return 1;
1109
1110nothing_to_do:
1111 qc->scsicmd->result = SAM_STAT_GOOD;
1112 return 1;
1113}
1114
1115/**
1116 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1117 * @qc: Storage for translated ATA taskfile
1118 * @scsicmd: SCSI command to translate
1119 *
1120 * Converts any of six SCSI read/write commands into the
1121 * ATA counterpart, including starting sector (LBA),
1122 * sector count, and taking into account the device's LBA48
1123 * support.
1124 *
1125 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1126 * %WRITE_16 are currently supported.
1127 *
1128 * LOCKING:
1129 * spin_lock_irqsave(host_set lock)
1130 *
1131 * RETURNS:
1132 * Zero on success, non-zero on error.
1133 */
1134
1135static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1136{
1137 struct ata_taskfile *tf = &qc->tf;
1138 struct ata_device *dev = qc->dev;
1139 u64 block;
1140 u32 n_block;
1141
1142 qc->flags |= ATA_QCFLAG_IO;
1143 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1144
1145 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1146 scsicmd[0] == WRITE_16)
1147 tf->flags |= ATA_TFLAG_WRITE;
1148
1149 /* Calculate the SCSI LBA, transfer length and FUA. */
1150 switch (scsicmd[0]) {
1151 case READ_10:
1152 case WRITE_10:
1153 scsi_10_lba_len(scsicmd, &block, &n_block);
1154 if (unlikely(scsicmd[1] & (1 << 3)))
1155 tf->flags |= ATA_TFLAG_FUA;
1156 break;
1157 case READ_6:
1158 case WRITE_6:
1159 scsi_6_lba_len(scsicmd, &block, &n_block);
1160
1161 /* for 6-byte r/w commands, transfer length 0
1162 * means 256 blocks of data, not 0 block.
1163 */
1164 if (!n_block)
1165 n_block = 256;
1166 break;
1167 case READ_16:
1168 case WRITE_16:
1169 scsi_16_lba_len(scsicmd, &block, &n_block);
1170 if (unlikely(scsicmd[1] & (1 << 3)))
1171 tf->flags |= ATA_TFLAG_FUA;
1172 break;
1173 default:
1174 DPRINTK("no-byte command\n");
1175 goto invalid_fld;
1176 }
1177
1178 /* Check and compose ATA command */
1179 if (!n_block)
1180 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1181 * length 0 means transfer 0 block of data.
1182 * However, for ATA R/W commands, sector count 0 means
1183 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1184 *
1185 * WARNING: one or two older ATA drives treat 0 as 0...
1186 */
1187 goto nothing_to_do;
1188
1189 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1190 /* yay, NCQ */
1191 if (!lba_48_ok(block, n_block))
1192 goto out_of_range;
1193
1194 tf->protocol = ATA_PROT_NCQ;
1195 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1196
1197 if (tf->flags & ATA_TFLAG_WRITE)
1198 tf->command = ATA_CMD_FPDMA_WRITE;
1199 else
1200 tf->command = ATA_CMD_FPDMA_READ;
1201
1202 qc->nsect = n_block;
1203
1204 tf->nsect = qc->tag << 3;
1205 tf->hob_feature = (n_block >> 8) & 0xff;
1206 tf->feature = n_block & 0xff;
1207
1208 tf->hob_lbah = (block >> 40) & 0xff;
1209 tf->hob_lbam = (block >> 32) & 0xff;
1210 tf->hob_lbal = (block >> 24) & 0xff;
1211 tf->lbah = (block >> 16) & 0xff;
1212 tf->lbam = (block >> 8) & 0xff;
1213 tf->lbal = block & 0xff;
1214
1215 tf->device = 1 << 6;
1216 if (tf->flags & ATA_TFLAG_FUA)
1217 tf->device |= 1 << 7;
1218 } else if (dev->flags & ATA_DFLAG_LBA) {
1219 tf->flags |= ATA_TFLAG_LBA;
1220
1221 if (lba_28_ok(block, n_block)) {
1222 /* use LBA28 */
1223 tf->device |= (block >> 24) & 0xf;
1224 } else if (lba_48_ok(block, n_block)) {
1225 if (!(dev->flags & ATA_DFLAG_LBA48))
1226 goto out_of_range;
1227
1228 /* use LBA48 */
1229 tf->flags |= ATA_TFLAG_LBA48;
1230
1231 tf->hob_nsect = (n_block >> 8) & 0xff;
1232
1233 tf->hob_lbah = (block >> 40) & 0xff;
1234 tf->hob_lbam = (block >> 32) & 0xff;
1235 tf->hob_lbal = (block >> 24) & 0xff;
1236 } else
1237 /* request too large even for LBA48 */
1238 goto out_of_range;
1239
1240 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1241 goto invalid_fld;
1242
1243 qc->nsect = n_block;
1244 tf->nsect = n_block & 0xff;
1245
1246 tf->lbah = (block >> 16) & 0xff;
1247 tf->lbam = (block >> 8) & 0xff;
1248 tf->lbal = block & 0xff;
1249
1250 tf->device |= ATA_LBA;
1251 } else {
1252 /* CHS */
1253 u32 sect, head, cyl, track;
1254
1255 /* The request -may- be too large for CHS addressing. */
1256 if (!lba_28_ok(block, n_block))
1257 goto out_of_range;
1258
1259 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1260 goto invalid_fld;
1261
1262 /* Convert LBA to CHS */
1263 track = (u32)block / dev->sectors;
1264 cyl = track / dev->heads;
1265 head = track % dev->heads;
1266 sect = (u32)block % dev->sectors + 1;
1267
1268 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1269 (u32)block, track, cyl, head, sect);
1270
1271 /* Check whether the converted CHS can fit.
1272 Cylinder: 0-65535
1273 Head: 0-15
1274 Sector: 1-255*/
1275 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1276 goto out_of_range;
1277
1278 qc->nsect = n_block;
1279 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1280 tf->lbal = sect;
1281 tf->lbam = cyl;
1282 tf->lbah = cyl >> 8;
1283 tf->device |= head;
1284 }
1285
1286 return 0;
1287
1288invalid_fld:
1289 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1290 /* "Invalid field in cbd" */
1291 return 1;
1292
1293out_of_range:
1294 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1295 /* "Logical Block Address out of range" */
1296 return 1;
1297
1298nothing_to_do:
1299 qc->scsicmd->result = SAM_STAT_GOOD;
1300 return 1;
1301}
1302
1303static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1304{
1305 struct scsi_cmnd *cmd = qc->scsicmd;
1306 u8 *cdb = cmd->cmnd;
1307 int need_sense = (qc->err_mask != 0);
1308
1309 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1310 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1311 * cache
1312 */
1313 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1314 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1315 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1316 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1317 ata_port_schedule_eh(qc->ap);
1318 }
1319
1320 /* For ATA pass thru (SAT) commands, generate a sense block if
1321 * user mandated it or if there's an error. Note that if we
1322 * generate because the user forced us to, a check condition
1323 * is generated and the ATA register values are returned
1324 * whether the command completed successfully or not. If there
1325 * was no error, SK, ASC and ASCQ will all be zero.
1326 */
1327 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1328 ((cdb[2] & 0x20) || need_sense)) {
1329 ata_gen_ata_desc_sense(qc);
1330 } else {
1331 if (!need_sense) {
1332 cmd->result = SAM_STAT_GOOD;
1333 } else {
1334 /* TODO: decide which descriptor format to use
1335 * for 48b LBA devices and call that here
1336 * instead of the fixed desc, which is only
1337 * good for smaller LBA (and maybe CHS?)
1338 * devices.
1339 */
1340 ata_gen_fixed_sense(qc);
1341 }
1342 }
1343
1344 if (need_sense && !qc->ap->ops->error_handler)
1345 ata_dump_status(qc->ap->id, &qc->result_tf);
1346
1347 qc->scsidone(cmd);
1348
1349 ata_qc_free(qc);
1350}
1351
1352/**
1353 * ata_scmd_need_defer - Check whether we need to defer scmd
1354 * @dev: ATA device to which the command is addressed
1355 * @is_io: Is the command IO (and thus possibly NCQ)?
1356 *
1357 * NCQ and non-NCQ commands cannot run together. As upper layer
1358 * only knows the queue depth, we are responsible for maintaining
1359 * exclusion. This function checks whether a new command can be
1360 * issued to @dev.
1361 *
1362 * LOCKING:
1363 * spin_lock_irqsave(host_set lock)
1364 *
1365 * RETURNS:
1366 * 1 if deferring is needed, 0 otherwise.
1367 */
1368static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1369{
1370 struct ata_port *ap = dev->ap;
1371
1372 if (!(dev->flags & ATA_DFLAG_NCQ))
1373 return 0;
1374
1375 if (is_io) {
1376 if (!ata_tag_valid(ap->active_tag))
1377 return 0;
1378 } else {
1379 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1380 return 0;
1381 }
1382 return 1;
1383}
1384
1385/**
1386 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1387 * @dev: ATA device to which the command is addressed
1388 * @cmd: SCSI command to execute
1389 * @done: SCSI command completion function
1390 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1391 *
1392 * Our ->queuecommand() function has decided that the SCSI
1393 * command issued can be directly translated into an ATA
1394 * command, rather than handled internally.
1395 *
1396 * This function sets up an ata_queued_cmd structure for the
1397 * SCSI command, and sends that ata_queued_cmd to the hardware.
1398 *
1399 * The xlat_func argument (actor) returns 0 if ready to execute
1400 * ATA command, else 1 to finish translation. If 1 is returned
1401 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1402 * to be set reflecting an error condition or clean (early)
1403 * termination.
1404 *
1405 * LOCKING:
1406 * spin_lock_irqsave(host_set lock)
1407 *
1408 * RETURNS:
1409 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1410 * needs to be deferred.
1411 */
1412static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1413 void (*done)(struct scsi_cmnd *),
1414 ata_xlat_func_t xlat_func)
1415{
1416 struct ata_queued_cmd *qc;
1417 u8 *scsicmd = cmd->cmnd;
1418 int is_io = xlat_func == ata_scsi_rw_xlat;
1419
1420 VPRINTK("ENTER\n");
1421
1422 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1423 goto defer;
1424
1425 qc = ata_scsi_qc_new(dev, cmd, done);
1426 if (!qc)
1427 goto err_mem;
1428
1429 /* data is present; dma-map it */
1430 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1431 cmd->sc_data_direction == DMA_TO_DEVICE) {
1432 if (unlikely(cmd->request_bufflen < 1)) {
1433 ata_dev_printk(dev, KERN_WARNING,
1434 "WARNING: zero len r/w req\n");
1435 goto err_did;
1436 }
1437
1438 if (cmd->use_sg)
1439 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1440 else
1441 ata_sg_init_one(qc, cmd->request_buffer,
1442 cmd->request_bufflen);
1443
1444 qc->dma_dir = cmd->sc_data_direction;
1445 }
1446
1447 qc->complete_fn = ata_scsi_qc_complete;
1448
1449 if (xlat_func(qc, scsicmd))
1450 goto early_finish;
1451
1452 /* select device, send command to hardware */
1453 ata_qc_issue(qc);
1454
1455 VPRINTK("EXIT\n");
1456 return 0;
1457
1458early_finish:
1459 ata_qc_free(qc);
1460 done(cmd);
1461 DPRINTK("EXIT - early finish (good or error)\n");
1462 return 0;
1463
1464err_did:
1465 ata_qc_free(qc);
1466err_mem:
1467 cmd->result = (DID_ERROR << 16);
1468 done(cmd);
1469 DPRINTK("EXIT - internal\n");
1470 return 0;
1471
1472defer:
1473 DPRINTK("EXIT - defer\n");
1474 return SCSI_MLQUEUE_DEVICE_BUSY;
1475}
1476
1477/**
1478 * ata_scsi_rbuf_get - Map response buffer.
1479 * @cmd: SCSI command containing buffer to be mapped.
1480 * @buf_out: Pointer to mapped area.
1481 *
1482 * Maps buffer contained within SCSI command @cmd.
1483 *
1484 * LOCKING:
1485 * spin_lock_irqsave(host_set lock)
1486 *
1487 * RETURNS:
1488 * Length of response buffer.
1489 */
1490
1491static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1492{
1493 u8 *buf;
1494 unsigned int buflen;
1495
1496 if (cmd->use_sg) {
1497 struct scatterlist *sg;
1498
1499 sg = (struct scatterlist *) cmd->request_buffer;
1500 buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
1501 buflen = sg->length;
1502 } else {
1503 buf = cmd->request_buffer;
1504 buflen = cmd->request_bufflen;
1505 }
1506
1507 *buf_out = buf;
1508 return buflen;
1509}
1510
1511/**
1512 * ata_scsi_rbuf_put - Unmap response buffer.
1513 * @cmd: SCSI command containing buffer to be unmapped.
1514 * @buf: buffer to unmap
1515 *
1516 * Unmaps response buffer contained within @cmd.
1517 *
1518 * LOCKING:
1519 * spin_lock_irqsave(host_set lock)
1520 */
1521
1522static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1523{
1524 if (cmd->use_sg) {
1525 struct scatterlist *sg;
1526
1527 sg = (struct scatterlist *) cmd->request_buffer;
1528 kunmap_atomic(buf - sg->offset, KM_USER0);
1529 }
1530}
1531
1532/**
1533 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1534 * @args: device IDENTIFY data / SCSI command of interest.
1535 * @actor: Callback hook for desired SCSI command simulator
1536 *
1537 * Takes care of the hard work of simulating a SCSI command...
1538 * Mapping the response buffer, calling the command's handler,
1539 * and handling the handler's return value. This return value
1540 * indicates whether the handler wishes the SCSI command to be
1541 * completed successfully (0), or not (in which case cmd->result
1542 * and sense buffer are assumed to be set).
1543 *
1544 * LOCKING:
1545 * spin_lock_irqsave(host_set lock)
1546 */
1547
1548void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1549 unsigned int (*actor) (struct ata_scsi_args *args,
1550 u8 *rbuf, unsigned int buflen))
1551{
1552 u8 *rbuf;
1553 unsigned int buflen, rc;
1554 struct scsi_cmnd *cmd = args->cmd;
1555
1556 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1557 memset(rbuf, 0, buflen);
1558 rc = actor(args, rbuf, buflen);
1559 ata_scsi_rbuf_put(cmd, rbuf);
1560
1561 if (rc == 0)
1562 cmd->result = SAM_STAT_GOOD;
1563 args->done(cmd);
1564}
1565
1566/**
1567 * ata_scsiop_inq_std - Simulate INQUIRY command
1568 * @args: device IDENTIFY data / SCSI command of interest.
1569 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1570 * @buflen: Response buffer length.
1571 *
1572 * Returns standard device identification data associated
1573 * with non-VPD INQUIRY command output.
1574 *
1575 * LOCKING:
1576 * spin_lock_irqsave(host_set lock)
1577 */
1578
1579unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1580 unsigned int buflen)
1581{
1582 u8 hdr[] = {
1583 TYPE_DISK,
1584 0,
1585 0x5, /* claim SPC-3 version compatibility */
1586 2,
1587 95 - 4
1588 };
1589
1590 /* set scsi removeable (RMB) bit per ata bit */
1591 if (ata_id_removeable(args->id))
1592 hdr[1] |= (1 << 7);
1593
1594 VPRINTK("ENTER\n");
1595
1596 memcpy(rbuf, hdr, sizeof(hdr));
1597
1598 if (buflen > 35) {
1599 memcpy(&rbuf[8], "ATA ", 8);
1600 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1601 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1602 if (rbuf[32] == 0 || rbuf[32] == ' ')
1603 memcpy(&rbuf[32], "n/a ", 4);
1604 }
1605
1606 if (buflen > 63) {
1607 const u8 versions[] = {
1608 0x60, /* SAM-3 (no version claimed) */
1609
1610 0x03,
1611 0x20, /* SBC-2 (no version claimed) */
1612
1613 0x02,
1614 0x60 /* SPC-3 (no version claimed) */
1615 };
1616
1617 memcpy(rbuf + 59, versions, sizeof(versions));
1618 }
1619
1620 return 0;
1621}
1622
1623/**
1624 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1625 * @args: device IDENTIFY data / SCSI command of interest.
1626 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1627 * @buflen: Response buffer length.
1628 *
1629 * Returns list of inquiry VPD pages available.
1630 *
1631 * LOCKING:
1632 * spin_lock_irqsave(host_set lock)
1633 */
1634
1635unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1636 unsigned int buflen)
1637{
1638 const u8 pages[] = {
1639 0x00, /* page 0x00, this page */
1640 0x80, /* page 0x80, unit serial no page */
1641 0x83 /* page 0x83, device ident page */
1642 };
1643 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1644
1645 if (buflen > 6)
1646 memcpy(rbuf + 4, pages, sizeof(pages));
1647
1648 return 0;
1649}
1650
1651/**
1652 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1653 * @args: device IDENTIFY data / SCSI command of interest.
1654 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1655 * @buflen: Response buffer length.
1656 *
1657 * Returns ATA device serial number.
1658 *
1659 * LOCKING:
1660 * spin_lock_irqsave(host_set lock)
1661 */
1662
1663unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1664 unsigned int buflen)
1665{
1666 const u8 hdr[] = {
1667 0,
1668 0x80, /* this page code */
1669 0,
1670 ATA_SERNO_LEN, /* page len */
1671 };
1672 memcpy(rbuf, hdr, sizeof(hdr));
1673
1674 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1675 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1676 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1677
1678 return 0;
1679}
1680
1681/**
1682 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1683 * @args: device IDENTIFY data / SCSI command of interest.
1684 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1685 * @buflen: Response buffer length.
1686 *
1687 * Yields two logical unit device identification designators:
1688 * - vendor specific ASCII containing the ATA serial number
1689 * - SAT defined "t10 vendor id based" containing ASCII vendor
1690 * name ("ATA "), model and serial numbers.
1691 *
1692 * LOCKING:
1693 * spin_lock_irqsave(host_set lock)
1694 */
1695
1696unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1697 unsigned int buflen)
1698{
1699 int num;
1700 const int sat_model_serial_desc_len = 68;
1701 const int ata_model_byte_len = 40;
1702
1703 rbuf[1] = 0x83; /* this page code */
1704 num = 4;
1705
1706 if (buflen > (ATA_SERNO_LEN + num + 3)) {
1707 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1708 rbuf[num + 0] = 2;
1709 rbuf[num + 3] = ATA_SERNO_LEN;
1710 num += 4;
1711 ata_id_string(args->id, (unsigned char *) rbuf + num,
1712 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1713 num += ATA_SERNO_LEN;
1714 }
1715 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1716 /* SAT defined lu model and serial numbers descriptor */
1717 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1718 rbuf[num + 0] = 2;
1719 rbuf[num + 1] = 1;
1720 rbuf[num + 3] = sat_model_serial_desc_len;
1721 num += 4;
1722 memcpy(rbuf + num, "ATA ", 8);
1723 num += 8;
1724 ata_id_string(args->id, (unsigned char *) rbuf + num,
1725 ATA_ID_PROD_OFS, ata_model_byte_len);
1726 num += ata_model_byte_len;
1727 ata_id_string(args->id, (unsigned char *) rbuf + num,
1728 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1729 num += ATA_SERNO_LEN;
1730 }
1731 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1732 return 0;
1733}
1734
1735/**
1736 * ata_scsiop_noop - Command handler that simply returns success.
1737 * @args: device IDENTIFY data / SCSI command of interest.
1738 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1739 * @buflen: Response buffer length.
1740 *
1741 * No operation. Simply returns success to caller, to indicate
1742 * that the caller should successfully complete this SCSI command.
1743 *
1744 * LOCKING:
1745 * spin_lock_irqsave(host_set lock)
1746 */
1747
1748unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1749 unsigned int buflen)
1750{
1751 VPRINTK("ENTER\n");
1752 return 0;
1753}
1754
1755/**
1756 * ata_msense_push - Push data onto MODE SENSE data output buffer
1757 * @ptr_io: (input/output) Location to store more output data
1758 * @last: End of output data buffer
1759 * @buf: Pointer to BLOB being added to output buffer
1760 * @buflen: Length of BLOB
1761 *
1762 * Store MODE SENSE data on an output buffer.
1763 *
1764 * LOCKING:
1765 * None.
1766 */
1767
1768static void ata_msense_push(u8 **ptr_io, const u8 *last,
1769 const u8 *buf, unsigned int buflen)
1770{
1771 u8 *ptr = *ptr_io;
1772
1773 if ((ptr + buflen - 1) > last)
1774 return;
1775
1776 memcpy(ptr, buf, buflen);
1777
1778 ptr += buflen;
1779
1780 *ptr_io = ptr;
1781}
1782
1783/**
1784 * ata_msense_caching - Simulate MODE SENSE caching info page
1785 * @id: device IDENTIFY data
1786 * @ptr_io: (input/output) Location to store more output data
1787 * @last: End of output data buffer
1788 *
1789 * Generate a caching info page, which conditionally indicates
1790 * write caching to the SCSI layer, depending on device
1791 * capabilities.
1792 *
1793 * LOCKING:
1794 * None.
1795 */
1796
1797static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1798 const u8 *last)
1799{
1800 u8 page[CACHE_MPAGE_LEN];
1801
1802 memcpy(page, def_cache_mpage, sizeof(page));
1803 if (ata_id_wcache_enabled(id))
1804 page[2] |= (1 << 2); /* write cache enable */
1805 if (!ata_id_rahead_enabled(id))
1806 page[12] |= (1 << 5); /* disable read ahead */
1807
1808 ata_msense_push(ptr_io, last, page, sizeof(page));
1809 return sizeof(page);
1810}
1811
1812/**
1813 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
1814 * @dev: Device associated with this MODE SENSE command
1815 * @ptr_io: (input/output) Location to store more output data
1816 * @last: End of output data buffer
1817 *
1818 * Generate a generic MODE SENSE control mode page.
1819 *
1820 * LOCKING:
1821 * None.
1822 */
1823
1824static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1825{
1826 ata_msense_push(ptr_io, last, def_control_mpage,
1827 sizeof(def_control_mpage));
1828 return sizeof(def_control_mpage);
1829}
1830
1831/**
1832 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1833 * @dev: Device associated with this MODE SENSE command
1834 * @ptr_io: (input/output) Location to store more output data
1835 * @last: End of output data buffer
1836 *
1837 * Generate a generic MODE SENSE r/w error recovery page.
1838 *
1839 * LOCKING:
1840 * None.
1841 */
1842
1843static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1844{
1845
1846 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1847 sizeof(def_rw_recovery_mpage));
1848 return sizeof(def_rw_recovery_mpage);
1849}
1850
1851/*
1852 * We can turn this into a real blacklist if it's needed, for now just
1853 * blacklist any Maxtor BANC1G10 revision firmware
1854 */
1855static int ata_dev_supports_fua(u16 *id)
1856{
1857 unsigned char model[41], fw[9];
1858
1859 if (!libata_fua)
1860 return 0;
1861 if (!ata_id_has_fua(id))
1862 return 0;
1863
1864 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1865 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1866
1867 if (strcmp(model, "Maxtor"))
1868 return 1;
1869 if (strcmp(fw, "BANC1G10"))
1870 return 1;
1871
1872 return 0; /* blacklisted */
1873}
1874
1875/**
1876 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
1877 * @args: device IDENTIFY data / SCSI command of interest.
1878 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1879 * @buflen: Response buffer length.
1880 *
1881 * Simulate MODE SENSE commands. Assume this is invoked for direct
1882 * access devices (e.g. disks) only. There should be no block
1883 * descriptor for other device types.
1884 *
1885 * LOCKING:
1886 * spin_lock_irqsave(host_set lock)
1887 */
1888
1889unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1890 unsigned int buflen)
1891{
1892 struct ata_device *dev = args->dev;
1893 u8 *scsicmd = args->cmd->cmnd, *p, *last;
1894 const u8 sat_blk_desc[] = {
1895 0, 0, 0, 0, /* number of blocks: sat unspecified */
1896 0,
1897 0, 0x2, 0x0 /* block length: 512 bytes */
1898 };
1899 u8 pg, spg;
1900 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
1901 u8 dpofua;
1902
1903 VPRINTK("ENTER\n");
1904
1905 six_byte = (scsicmd[0] == MODE_SENSE);
1906 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
1907 /*
1908 * LLBA bit in msense(10) ignored (compliant)
1909 */
1910
1911 page_control = scsicmd[2] >> 6;
1912 switch (page_control) {
1913 case 0: /* current */
1914 break; /* supported */
1915 case 3: /* saved */
1916 goto saving_not_supp;
1917 case 1: /* changeable */
1918 case 2: /* defaults */
1919 default:
1920 goto invalid_fld;
1921 }
1922
1923 if (six_byte) {
1924 output_len = 4 + (ebd ? 8 : 0);
1925 alloc_len = scsicmd[4];
1926 } else {
1927 output_len = 8 + (ebd ? 8 : 0);
1928 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
1929 }
1930 minlen = (alloc_len < buflen) ? alloc_len : buflen;
1931
1932 p = rbuf + output_len;
1933 last = rbuf + minlen - 1;
1934
1935 pg = scsicmd[2] & 0x3f;
1936 spg = scsicmd[3];
1937 /*
1938 * No mode subpages supported (yet) but asking for _all_
1939 * subpages may be valid
1940 */
1941 if (spg && (spg != ALL_SUB_MPAGES))
1942 goto invalid_fld;
1943
1944 switch(pg) {
1945 case RW_RECOVERY_MPAGE:
1946 output_len += ata_msense_rw_recovery(&p, last);
1947 break;
1948
1949 case CACHE_MPAGE:
1950 output_len += ata_msense_caching(args->id, &p, last);
1951 break;
1952
1953 case CONTROL_MPAGE: {
1954 output_len += ata_msense_ctl_mode(&p, last);
1955 break;
1956 }
1957
1958 case ALL_MPAGES:
1959 output_len += ata_msense_rw_recovery(&p, last);
1960 output_len += ata_msense_caching(args->id, &p, last);
1961 output_len += ata_msense_ctl_mode(&p, last);
1962 break;
1963
1964 default: /* invalid page code */
1965 goto invalid_fld;
1966 }
1967
1968 if (minlen < 1)
1969 return 0;
1970
1971 dpofua = 0;
1972 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
1973 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1974 dpofua = 1 << 4;
1975
1976 if (six_byte) {
1977 output_len--;
1978 rbuf[0] = output_len;
1979 if (minlen > 2)
1980 rbuf[2] |= dpofua;
1981 if (ebd) {
1982 if (minlen > 3)
1983 rbuf[3] = sizeof(sat_blk_desc);
1984 if (minlen > 11)
1985 memcpy(rbuf + 4, sat_blk_desc,
1986 sizeof(sat_blk_desc));
1987 }
1988 } else {
1989 output_len -= 2;
1990 rbuf[0] = output_len >> 8;
1991 if (minlen > 1)
1992 rbuf[1] = output_len;
1993 if (minlen > 3)
1994 rbuf[3] |= dpofua;
1995 if (ebd) {
1996 if (minlen > 7)
1997 rbuf[7] = sizeof(sat_blk_desc);
1998 if (minlen > 15)
1999 memcpy(rbuf + 8, sat_blk_desc,
2000 sizeof(sat_blk_desc));
2001 }
2002 }
2003 return 0;
2004
2005invalid_fld:
2006 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2007 /* "Invalid field in cbd" */
2008 return 1;
2009
2010saving_not_supp:
2011 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2012 /* "Saving parameters not supported" */
2013 return 1;
2014}
2015
2016/**
2017 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2018 * @args: device IDENTIFY data / SCSI command of interest.
2019 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2020 * @buflen: Response buffer length.
2021 *
2022 * Simulate READ CAPACITY commands.
2023 *
2024 * LOCKING:
2025 * spin_lock_irqsave(host_set lock)
2026 */
2027
2028unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2029 unsigned int buflen)
2030{
2031 u64 n_sectors;
2032 u32 tmp;
2033
2034 VPRINTK("ENTER\n");
2035
2036 if (ata_id_has_lba(args->id)) {
2037 if (ata_id_has_lba48(args->id))
2038 n_sectors = ata_id_u64(args->id, 100);
2039 else
2040 n_sectors = ata_id_u32(args->id, 60);
2041 } else {
2042 /* CHS default translation */
2043 n_sectors = args->id[1] * args->id[3] * args->id[6];
2044
2045 if (ata_id_current_chs_valid(args->id))
2046 /* CHS current translation */
2047 n_sectors = ata_id_u32(args->id, 57);
2048 }
2049
2050 n_sectors--; /* ATA TotalUserSectors - 1 */
2051
2052 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2053 if( n_sectors >= 0xffffffffULL )
2054 tmp = 0xffffffff ; /* Return max count on overflow */
2055 else
2056 tmp = n_sectors ;
2057
2058 /* sector count, 32-bit */
2059 rbuf[0] = tmp >> (8 * 3);
2060 rbuf[1] = tmp >> (8 * 2);
2061 rbuf[2] = tmp >> (8 * 1);
2062 rbuf[3] = tmp;
2063
2064 /* sector size */
2065 tmp = ATA_SECT_SIZE;
2066 rbuf[6] = tmp >> 8;
2067 rbuf[7] = tmp;
2068
2069 } else {
2070 /* sector count, 64-bit */
2071 tmp = n_sectors >> (8 * 4);
2072 rbuf[2] = tmp >> (8 * 3);
2073 rbuf[3] = tmp >> (8 * 2);
2074 rbuf[4] = tmp >> (8 * 1);
2075 rbuf[5] = tmp;
2076 tmp = n_sectors;
2077 rbuf[6] = tmp >> (8 * 3);
2078 rbuf[7] = tmp >> (8 * 2);
2079 rbuf[8] = tmp >> (8 * 1);
2080 rbuf[9] = tmp;
2081
2082 /* sector size */
2083 tmp = ATA_SECT_SIZE;
2084 rbuf[12] = tmp >> 8;
2085 rbuf[13] = tmp;
2086 }
2087
2088 return 0;
2089}
2090
2091/**
2092 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2093 * @args: device IDENTIFY data / SCSI command of interest.
2094 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2095 * @buflen: Response buffer length.
2096 *
2097 * Simulate REPORT LUNS command.
2098 *
2099 * LOCKING:
2100 * spin_lock_irqsave(host_set lock)
2101 */
2102
2103unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2104 unsigned int buflen)
2105{
2106 VPRINTK("ENTER\n");
2107 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2108
2109 return 0;
2110}
2111
2112/**
2113 * ata_scsi_set_sense - Set SCSI sense data and status
2114 * @cmd: SCSI request to be handled
2115 * @sk: SCSI-defined sense key
2116 * @asc: SCSI-defined additional sense code
2117 * @ascq: SCSI-defined additional sense code qualifier
2118 *
2119 * Helper function that builds a valid fixed format, current
2120 * response code and the given sense key (sk), additional sense
2121 * code (asc) and additional sense code qualifier (ascq) with
2122 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2123 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2124 *
2125 * LOCKING:
2126 * Not required
2127 */
2128
2129void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2130{
2131 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2132
2133 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
2134 cmd->sense_buffer[2] = sk;
2135 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
2136 cmd->sense_buffer[12] = asc;
2137 cmd->sense_buffer[13] = ascq;
2138}
2139
2140/**
2141 * ata_scsi_badcmd - End a SCSI request with an error
2142 * @cmd: SCSI request to be handled
2143 * @done: SCSI command completion function
2144 * @asc: SCSI-defined additional sense code
2145 * @ascq: SCSI-defined additional sense code qualifier
2146 *
2147 * Helper function that completes a SCSI command with
2148 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2149 * and the specified additional sense codes.
2150 *
2151 * LOCKING:
2152 * spin_lock_irqsave(host_set lock)
2153 */
2154
2155void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2156{
2157 DPRINTK("ENTER\n");
2158 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2159
2160 done(cmd);
2161}
2162
2163static void atapi_sense_complete(struct ata_queued_cmd *qc)
2164{
2165 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2166 /* FIXME: not quite right; we don't want the
2167 * translation of taskfile registers into
2168 * a sense descriptors, since that's only
2169 * correct for ATA, not ATAPI
2170 */
2171 ata_gen_ata_desc_sense(qc);
2172 }
2173
2174 qc->scsidone(qc->scsicmd);
2175 ata_qc_free(qc);
2176}
2177
2178/* is it pointless to prefer PIO for "safety reasons"? */
2179static inline int ata_pio_use_silly(struct ata_port *ap)
2180{
2181 return (ap->flags & ATA_FLAG_PIO_DMA);
2182}
2183
2184static void atapi_request_sense(struct ata_queued_cmd *qc)
2185{
2186 struct ata_port *ap = qc->ap;
2187 struct scsi_cmnd *cmd = qc->scsicmd;
2188
2189 DPRINTK("ATAPI request sense\n");
2190
2191 /* FIXME: is this needed? */
2192 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2193
2194 ap->ops->tf_read(ap, &qc->tf);
2195
2196 /* fill these in, for the case where they are -not- overwritten */
2197 cmd->sense_buffer[0] = 0x70;
2198 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2199
2200 ata_qc_reinit(qc);
2201
2202 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2203 qc->dma_dir = DMA_FROM_DEVICE;
2204
2205 memset(&qc->cdb, 0, qc->dev->cdb_len);
2206 qc->cdb[0] = REQUEST_SENSE;
2207 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2208
2209 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2210 qc->tf.command = ATA_CMD_PACKET;
2211
2212 if (ata_pio_use_silly(ap)) {
2213 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2214 qc->tf.feature |= ATAPI_PKT_DMA;
2215 } else {
2216 qc->tf.protocol = ATA_PROT_ATAPI;
2217 qc->tf.lbam = (8 * 1024) & 0xff;
2218 qc->tf.lbah = (8 * 1024) >> 8;
2219 }
2220 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2221
2222 qc->complete_fn = atapi_sense_complete;
2223
2224 ata_qc_issue(qc);
2225
2226 DPRINTK("EXIT\n");
2227}
2228
2229static void atapi_qc_complete(struct ata_queued_cmd *qc)
2230{
2231 struct scsi_cmnd *cmd = qc->scsicmd;
2232 unsigned int err_mask = qc->err_mask;
2233
2234 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2235
2236 /* handle completion from new EH */
2237 if (unlikely(qc->ap->ops->error_handler &&
2238 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2239
2240 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2241 /* FIXME: not quite right; we don't want the
2242 * translation of taskfile registers into a
2243 * sense descriptors, since that's only
2244 * correct for ATA, not ATAPI
2245 */
2246 ata_gen_ata_desc_sense(qc);
2247 }
2248
2249 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2250 qc->scsidone(cmd);
2251 ata_qc_free(qc);
2252 return;
2253 }
2254
2255 /* successful completion or old EH failure path */
2256 if (unlikely(err_mask & AC_ERR_DEV)) {
2257 cmd->result = SAM_STAT_CHECK_CONDITION;
2258 atapi_request_sense(qc);
2259 return;
2260 } else if (unlikely(err_mask)) {
2261 /* FIXME: not quite right; we don't want the
2262 * translation of taskfile registers into
2263 * a sense descriptors, since that's only
2264 * correct for ATA, not ATAPI
2265 */
2266 ata_gen_ata_desc_sense(qc);
2267 } else {
2268 u8 *scsicmd = cmd->cmnd;
2269
2270 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2271 u8 *buf = NULL;
2272 unsigned int buflen;
2273
2274 buflen = ata_scsi_rbuf_get(cmd, &buf);
2275
2276 /* ATAPI devices typically report zero for their SCSI version,
2277 * and sometimes deviate from the spec WRT response data
2278 * format. If SCSI version is reported as zero like normal,
2279 * then we make the following fixups: 1) Fake MMC-5 version,
2280 * to indicate to the Linux scsi midlayer this is a modern
2281 * device. 2) Ensure response data format / ATAPI information
2282 * are always correct.
2283 */
2284 if (buf[2] == 0) {
2285 buf[2] = 0x5;
2286 buf[3] = 0x32;
2287 }
2288
2289 ata_scsi_rbuf_put(cmd, buf);
2290 }
2291
2292 cmd->result = SAM_STAT_GOOD;
2293 }
2294
2295 qc->scsidone(cmd);
2296 ata_qc_free(qc);
2297}
2298/**
2299 * atapi_xlat - Initialize PACKET taskfile
2300 * @qc: command structure to be initialized
2301 * @scsicmd: SCSI CDB associated with this PACKET command
2302 *
2303 * LOCKING:
2304 * spin_lock_irqsave(host_set lock)
2305 *
2306 * RETURNS:
2307 * Zero on success, non-zero on failure.
2308 */
2309
2310static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2311{
2312 struct scsi_cmnd *cmd = qc->scsicmd;
2313 struct ata_device *dev = qc->dev;
2314 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2315 int nodata = (cmd->sc_data_direction == DMA_NONE);
2316
2317 if (!using_pio)
2318 /* Check whether ATAPI DMA is safe */
2319 if (ata_check_atapi_dma(qc))
2320 using_pio = 1;
2321
2322 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2323
2324 qc->complete_fn = atapi_qc_complete;
2325
2326 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2327 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2328 qc->tf.flags |= ATA_TFLAG_WRITE;
2329 DPRINTK("direction: write\n");
2330 }
2331
2332 qc->tf.command = ATA_CMD_PACKET;
2333
2334 /* no data, or PIO data xfer */
2335 if (using_pio || nodata) {
2336 if (nodata)
2337 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2338 else
2339 qc->tf.protocol = ATA_PROT_ATAPI;
2340 qc->tf.lbam = (8 * 1024) & 0xff;
2341 qc->tf.lbah = (8 * 1024) >> 8;
2342 }
2343
2344 /* DMA data xfer */
2345 else {
2346 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2347 qc->tf.feature |= ATAPI_PKT_DMA;
2348
2349 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2350 /* some SATA bridges need us to indicate data xfer direction */
2351 qc->tf.feature |= ATAPI_DMADIR;
2352 }
2353
2354 qc->nbytes = cmd->request_bufflen;
2355
2356 return 0;
2357}
2358
2359static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2360{
2361 if (likely(id < ATA_MAX_DEVICES))
2362 return &ap->device[id];
2363 return NULL;
2364}
2365
2366static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2367 const struct scsi_device *scsidev)
2368{
2369 /* skip commands not addressed to targets we simulate */
2370 if (unlikely(scsidev->channel || scsidev->lun))
2371 return NULL;
2372
2373 return ata_find_dev(ap, scsidev->id);
2374}
2375
2376/**
2377 * ata_scsi_dev_enabled - determine if device is enabled
2378 * @dev: ATA device
2379 *
2380 * Determine if commands should be sent to the specified device.
2381 *
2382 * LOCKING:
2383 * spin_lock_irqsave(host_set lock)
2384 *
2385 * RETURNS:
2386 * 0 if commands are not allowed / 1 if commands are allowed
2387 */
2388
2389static int ata_scsi_dev_enabled(struct ata_device *dev)
2390{
2391 if (unlikely(!ata_dev_enabled(dev)))
2392 return 0;
2393
2394 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2395 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2396 ata_dev_printk(dev, KERN_WARNING,
2397 "WARNING: ATAPI is %s, device ignored.\n",
2398 atapi_enabled ? "not supported with this driver" : "disabled");
2399 return 0;
2400 }
2401 }
2402
2403 return 1;
2404}
2405
2406/**
2407 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2408 * @ap: ATA port to which the device is attached
2409 * @scsidev: SCSI device from which we derive the ATA device
2410 *
2411 * Given various information provided in struct scsi_cmnd,
2412 * map that onto an ATA bus, and using that mapping
2413 * determine which ata_device is associated with the
2414 * SCSI command to be sent.
2415 *
2416 * LOCKING:
2417 * spin_lock_irqsave(host_set lock)
2418 *
2419 * RETURNS:
2420 * Associated ATA device, or %NULL if not found.
2421 */
2422static struct ata_device *
2423ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2424{
2425 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2426
2427 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2428 return NULL;
2429
2430 return dev;
2431}
2432
2433/*
2434 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2435 * @byte1: Byte 1 from pass-thru CDB.
2436 *
2437 * RETURNS:
2438 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2439 */
2440static u8
2441ata_scsi_map_proto(u8 byte1)
2442{
2443 switch((byte1 & 0x1e) >> 1) {
2444 case 3: /* Non-data */
2445 return ATA_PROT_NODATA;
2446
2447 case 6: /* DMA */
2448 return ATA_PROT_DMA;
2449
2450 case 4: /* PIO Data-in */
2451 case 5: /* PIO Data-out */
2452 return ATA_PROT_PIO;
2453
2454 case 10: /* Device Reset */
2455 case 0: /* Hard Reset */
2456 case 1: /* SRST */
2457 case 2: /* Bus Idle */
2458 case 7: /* Packet */
2459 case 8: /* DMA Queued */
2460 case 9: /* Device Diagnostic */
2461 case 11: /* UDMA Data-in */
2462 case 12: /* UDMA Data-Out */
2463 case 13: /* FPDMA */
2464 default: /* Reserved */
2465 break;
2466 }
2467
2468 return ATA_PROT_UNKNOWN;
2469}
2470
2471/**
2472 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2473 * @qc: command structure to be initialized
2474 * @scsicmd: SCSI command to convert
2475 *
2476 * Handles either 12 or 16-byte versions of the CDB.
2477 *
2478 * RETURNS:
2479 * Zero on success, non-zero on failure.
2480 */
2481static unsigned int
2482ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2483{
2484 struct ata_taskfile *tf = &(qc->tf);
2485 struct scsi_cmnd *cmd = qc->scsicmd;
2486 struct ata_device *dev = qc->dev;
2487
2488 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2489 goto invalid_fld;
2490
2491 /* We may not issue DMA commands if no DMA mode is set */
2492 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2493 goto invalid_fld;
2494
2495 if (scsicmd[1] & 0xe0)
2496 /* PIO multi not supported yet */
2497 goto invalid_fld;
2498
2499 /*
2500 * 12 and 16 byte CDBs use different offsets to
2501 * provide the various register values.
2502 */
2503 if (scsicmd[0] == ATA_16) {
2504 /*
2505 * 16-byte CDB - may contain extended commands.
2506 *
2507 * If that is the case, copy the upper byte register values.
2508 */
2509 if (scsicmd[1] & 0x01) {
2510 tf->hob_feature = scsicmd[3];
2511 tf->hob_nsect = scsicmd[5];
2512 tf->hob_lbal = scsicmd[7];
2513 tf->hob_lbam = scsicmd[9];
2514 tf->hob_lbah = scsicmd[11];
2515 tf->flags |= ATA_TFLAG_LBA48;
2516 } else
2517 tf->flags &= ~ATA_TFLAG_LBA48;
2518
2519 /*
2520 * Always copy low byte, device and command registers.
2521 */
2522 tf->feature = scsicmd[4];
2523 tf->nsect = scsicmd[6];
2524 tf->lbal = scsicmd[8];
2525 tf->lbam = scsicmd[10];
2526 tf->lbah = scsicmd[12];
2527 tf->device = scsicmd[13];
2528 tf->command = scsicmd[14];
2529 } else {
2530 /*
2531 * 12-byte CDB - incapable of extended commands.
2532 */
2533 tf->flags &= ~ATA_TFLAG_LBA48;
2534
2535 tf->feature = scsicmd[3];
2536 tf->nsect = scsicmd[4];
2537 tf->lbal = scsicmd[5];
2538 tf->lbam = scsicmd[6];
2539 tf->lbah = scsicmd[7];
2540 tf->device = scsicmd[8];
2541 tf->command = scsicmd[9];
2542 }
2543 /*
2544 * If slave is possible, enforce correct master/slave bit
2545 */
2546 if (qc->ap->flags & ATA_FLAG_SLAVE_POSS)
2547 tf->device = qc->dev->devno ?
2548 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2549
2550 /*
2551 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2552 * SET_FEATURES - XFER MODE must be preceded/succeeded
2553 * by an update to hardware-specific registers for each
2554 * controller (i.e. the reason for ->set_piomode(),
2555 * ->set_dmamode(), and ->post_set_mode() hooks).
2556 */
2557 if ((tf->command == ATA_CMD_SET_FEATURES)
2558 && (tf->feature == SETFEATURES_XFER))
2559 goto invalid_fld;
2560
2561 /*
2562 * Set flags so that all registers will be written,
2563 * and pass on write indication (used for PIO/DMA
2564 * setup.)
2565 */
2566 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2567
2568 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2569 tf->flags |= ATA_TFLAG_WRITE;
2570
2571 /*
2572 * Set transfer length.
2573 *
2574 * TODO: find out if we need to do more here to
2575 * cover scatter/gather case.
2576 */
2577 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2578
2579 /* request result TF */
2580 qc->flags |= ATA_QCFLAG_RESULT_TF;
2581
2582 return 0;
2583
2584 invalid_fld:
2585 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
2586 /* "Invalid field in cdb" */
2587 return 1;
2588}
2589
2590/**
2591 * ata_get_xlat_func - check if SCSI to ATA translation is possible
2592 * @dev: ATA device
2593 * @cmd: SCSI command opcode to consider
2594 *
2595 * Look up the SCSI command given, and determine whether the
2596 * SCSI command is to be translated or simulated.
2597 *
2598 * RETURNS:
2599 * Pointer to translation function if possible, %NULL if not.
2600 */
2601
2602static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2603{
2604 switch (cmd) {
2605 case READ_6:
2606 case READ_10:
2607 case READ_16:
2608
2609 case WRITE_6:
2610 case WRITE_10:
2611 case WRITE_16:
2612 return ata_scsi_rw_xlat;
2613
2614 case SYNCHRONIZE_CACHE:
2615 if (ata_try_flush_cache(dev))
2616 return ata_scsi_flush_xlat;
2617 break;
2618
2619 case VERIFY:
2620 case VERIFY_16:
2621 return ata_scsi_verify_xlat;
2622
2623 case ATA_12:
2624 case ATA_16:
2625 return ata_scsi_pass_thru;
2626
2627 case START_STOP:
2628 return ata_scsi_start_stop_xlat;
2629 }
2630
2631 return NULL;
2632}
2633
2634/**
2635 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
2636 * @ap: ATA port to which the command was being sent
2637 * @cmd: SCSI command to dump
2638 *
2639 * Prints the contents of a SCSI command via printk().
2640 */
2641
2642static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2643 struct scsi_cmnd *cmd)
2644{
2645#ifdef ATA_DEBUG
2646 struct scsi_device *scsidev = cmd->device;
2647 u8 *scsicmd = cmd->cmnd;
2648
2649 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2650 ap->id,
2651 scsidev->channel, scsidev->id, scsidev->lun,
2652 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2653 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2654 scsicmd[8]);
2655#endif
2656}
2657
2658static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2659 void (*done)(struct scsi_cmnd *),
2660 struct ata_device *dev)
2661{
2662 int rc = 0;
2663
2664 if (dev->class == ATA_DEV_ATA) {
2665 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2666 cmd->cmnd[0]);
2667
2668 if (xlat_func)
2669 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2670 else
2671 ata_scsi_simulate(dev, cmd, done);
2672 } else
2673 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2674
2675 return rc;
2676}
2677
2678/**
2679 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
2680 * @cmd: SCSI command to be sent
2681 * @done: Completion function, called when command is complete
2682 *
2683 * In some cases, this function translates SCSI commands into
2684 * ATA taskfiles, and queues the taskfiles to be sent to
2685 * hardware. In other cases, this function simulates a
2686 * SCSI device by evaluating and responding to certain
2687 * SCSI commands. This creates the overall effect of
2688 * ATA and ATAPI devices appearing as SCSI devices.
2689 *
2690 * LOCKING:
2691 * Releases scsi-layer-held lock, and obtains host_set lock.
2692 *
2693 * RETURNS:
2694 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2695 * 0 otherwise.
2696 */
2697int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2698{
2699 struct ata_port *ap;
2700 struct ata_device *dev;
2701 struct scsi_device *scsidev = cmd->device;
2702 struct Scsi_Host *shost = scsidev->host;
2703 int rc = 0;
2704
2705 ap = ata_shost_to_port(shost);
2706
2707 spin_unlock(shost->host_lock);
2708 spin_lock(ap->lock);
2709
2710 ata_scsi_dump_cdb(ap, cmd);
2711
2712 dev = ata_scsi_find_dev(ap, scsidev);
2713 if (likely(dev))
2714 rc = __ata_scsi_queuecmd(cmd, done, dev);
2715 else {
2716 cmd->result = (DID_BAD_TARGET << 16);
2717 done(cmd);
2718 }
2719
2720 spin_unlock(ap->lock);
2721 spin_lock(shost->host_lock);
2722 return rc;
2723}
2724
2725/**
2726 * ata_scsi_simulate - simulate SCSI command on ATA device
2727 * @dev: the target device
2728 * @cmd: SCSI command being sent to device.
2729 * @done: SCSI command completion function.
2730 *
2731 * Interprets and directly executes a select list of SCSI commands
2732 * that can be handled internally.
2733 *
2734 * LOCKING:
2735 * spin_lock_irqsave(host_set lock)
2736 */
2737
2738void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2739 void (*done)(struct scsi_cmnd *))
2740{
2741 struct ata_scsi_args args;
2742 const u8 *scsicmd = cmd->cmnd;
2743
2744 args.dev = dev;
2745 args.id = dev->id;
2746 args.cmd = cmd;
2747 args.done = done;
2748
2749 switch(scsicmd[0]) {
2750 /* no-op's, complete with success */
2751 case SYNCHRONIZE_CACHE:
2752 case REZERO_UNIT:
2753 case SEEK_6:
2754 case SEEK_10:
2755 case TEST_UNIT_READY:
2756 case FORMAT_UNIT: /* FIXME: correct? */
2757 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2758 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2759 break;
2760
2761 case INQUIRY:
2762 if (scsicmd[1] & 2) /* is CmdDt set? */
2763 ata_scsi_invalid_field(cmd, done);
2764 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2765 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2766 else if (scsicmd[2] == 0x00)
2767 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2768 else if (scsicmd[2] == 0x80)
2769 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2770 else if (scsicmd[2] == 0x83)
2771 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2772 else
2773 ata_scsi_invalid_field(cmd, done);
2774 break;
2775
2776 case MODE_SENSE:
2777 case MODE_SENSE_10:
2778 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
2779 break;
2780
2781 case MODE_SELECT: /* unconditionally return */
2782 case MODE_SELECT_10: /* bad-field-in-cdb */
2783 ata_scsi_invalid_field(cmd, done);
2784 break;
2785
2786 case READ_CAPACITY:
2787 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2788 break;
2789
2790 case SERVICE_ACTION_IN:
2791 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
2792 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2793 else
2794 ata_scsi_invalid_field(cmd, done);
2795 break;
2796
2797 case REPORT_LUNS:
2798 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2799 break;
2800
2801 /* mandatory commands we haven't implemented yet */
2802 case REQUEST_SENSE:
2803
2804 /* all other commands */
2805 default:
2806 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2807 /* "Invalid command operation code" */
2808 done(cmd);
2809 break;
2810 }
2811}
2812
2813void ata_scsi_scan_host(struct ata_port *ap)
2814{
2815 unsigned int i;
2816
2817 if (ap->flags & ATA_FLAG_DISABLED)
2818 return;
2819
2820 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2821 struct ata_device *dev = &ap->device[i];
2822 struct scsi_device *sdev;
2823
2824 if (!ata_dev_enabled(dev) || dev->sdev)
2825 continue;
2826
2827 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
2828 if (!IS_ERR(sdev)) {
2829 dev->sdev = sdev;
2830 scsi_device_put(sdev);
2831 }
2832 }
2833}
2834
2835/**
2836 * ata_scsi_offline_dev - offline attached SCSI device
2837 * @dev: ATA device to offline attached SCSI device for
2838 *
2839 * This function is called from ata_eh_hotplug() and responsible
2840 * for taking the SCSI device attached to @dev offline. This
2841 * function is called with host_set lock which protects dev->sdev
2842 * against clearing.
2843 *
2844 * LOCKING:
2845 * spin_lock_irqsave(host_set lock)
2846 *
2847 * RETURNS:
2848 * 1 if attached SCSI device exists, 0 otherwise.
2849 */
2850int ata_scsi_offline_dev(struct ata_device *dev)
2851{
2852 if (dev->sdev) {
2853 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2854 return 1;
2855 }
2856 return 0;
2857}
2858
2859/**
2860 * ata_scsi_remove_dev - remove attached SCSI device
2861 * @dev: ATA device to remove attached SCSI device for
2862 *
2863 * This function is called from ata_eh_scsi_hotplug() and
2864 * responsible for removing the SCSI device attached to @dev.
2865 *
2866 * LOCKING:
2867 * Kernel thread context (may sleep).
2868 */
2869static void ata_scsi_remove_dev(struct ata_device *dev)
2870{
2871 struct ata_port *ap = dev->ap;
2872 struct scsi_device *sdev;
2873 unsigned long flags;
2874
2875 /* Alas, we need to grab scan_mutex to ensure SCSI device
2876 * state doesn't change underneath us and thus
2877 * scsi_device_get() always succeeds. The mutex locking can
2878 * be removed if there is __scsi_device_get() interface which
2879 * increments reference counts regardless of device state.
2880 */
2881 mutex_lock(&ap->host->scan_mutex);
2882 spin_lock_irqsave(ap->lock, flags);
2883
2884 /* clearing dev->sdev is protected by host_set lock */
2885 sdev = dev->sdev;
2886 dev->sdev = NULL;
2887
2888 if (sdev) {
2889 /* If user initiated unplug races with us, sdev can go
2890 * away underneath us after the host_set lock and
2891 * scan_mutex are released. Hold onto it.
2892 */
2893 if (scsi_device_get(sdev) == 0) {
2894 /* The following ensures the attached sdev is
2895 * offline on return from ata_scsi_offline_dev()
2896 * regardless it wins or loses the race
2897 * against this function.
2898 */
2899 scsi_device_set_state(sdev, SDEV_OFFLINE);
2900 } else {
2901 WARN_ON(1);
2902 sdev = NULL;
2903 }
2904 }
2905
2906 spin_unlock_irqrestore(ap->lock, flags);
2907 mutex_unlock(&ap->host->scan_mutex);
2908
2909 if (sdev) {
2910 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
2911 sdev->sdev_gendev.bus_id);
2912
2913 scsi_remove_device(sdev);
2914 scsi_device_put(sdev);
2915 }
2916}
2917
2918/**
2919 * ata_scsi_hotplug - SCSI part of hotplug
2920 * @data: Pointer to ATA port to perform SCSI hotplug on
2921 *
2922 * Perform SCSI part of hotplug. It's executed from a separate
2923 * workqueue after EH completes. This is necessary because SCSI
2924 * hot plugging requires working EH and hot unplugging is
2925 * synchronized with hot plugging with a mutex.
2926 *
2927 * LOCKING:
2928 * Kernel thread context (may sleep).
2929 */
2930void ata_scsi_hotplug(void *data)
2931{
2932 struct ata_port *ap = data;
2933 int i;
2934
2935 if (ap->flags & ATA_FLAG_UNLOADING) {
2936 DPRINTK("ENTER/EXIT - unloading\n");
2937 return;
2938 }
2939
2940 DPRINTK("ENTER\n");
2941
2942 /* unplug detached devices */
2943 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2944 struct ata_device *dev = &ap->device[i];
2945 unsigned long flags;
2946
2947 if (!(dev->flags & ATA_DFLAG_DETACHED))
2948 continue;
2949
2950 spin_lock_irqsave(ap->lock, flags);
2951 dev->flags &= ~ATA_DFLAG_DETACHED;
2952 spin_unlock_irqrestore(ap->lock, flags);
2953
2954 ata_scsi_remove_dev(dev);
2955 }
2956
2957 /* scan for new ones */
2958 ata_scsi_scan_host(ap);
2959
2960 /* If we scanned while EH was in progress, scan would have
2961 * failed silently. Requeue if there are enabled but
2962 * unattached devices.
2963 */
2964 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2965 struct ata_device *dev = &ap->device[i];
2966 if (ata_dev_enabled(dev) && !dev->sdev) {
2967 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
2968 break;
2969 }
2970 }
2971
2972 DPRINTK("EXIT\n");
2973}
2974
2975/**
2976 * ata_scsi_user_scan - indication for user-initiated bus scan
2977 * @shost: SCSI host to scan
2978 * @channel: Channel to scan
2979 * @id: ID to scan
2980 * @lun: LUN to scan
2981 *
2982 * This function is called when user explicitly requests bus
2983 * scan. Set probe pending flag and invoke EH.
2984 *
2985 * LOCKING:
2986 * SCSI layer (we don't care)
2987 *
2988 * RETURNS:
2989 * Zero.
2990 */
2991static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
2992 unsigned int id, unsigned int lun)
2993{
2994 struct ata_port *ap = ata_shost_to_port(shost);
2995 unsigned long flags;
2996 int rc = 0;
2997
2998 if (!ap->ops->error_handler)
2999 return -EOPNOTSUPP;
3000
3001 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3002 (lun != SCAN_WILD_CARD && lun != 0))
3003 return -EINVAL;
3004
3005 spin_lock_irqsave(ap->lock, flags);
3006
3007 if (id == SCAN_WILD_CARD) {
3008 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3009 ap->eh_info.action |= ATA_EH_SOFTRESET;
3010 } else {
3011 struct ata_device *dev = ata_find_dev(ap, id);
3012
3013 if (dev) {
3014 ap->eh_info.probe_mask |= 1 << dev->devno;
3015 ap->eh_info.action |= ATA_EH_SOFTRESET;
3016 } else
3017 rc = -EINVAL;
3018 }
3019
3020 if (rc == 0)
3021 ata_port_schedule_eh(ap);
3022
3023 spin_unlock_irqrestore(ap->lock, flags);
3024
3025 return rc;
3026}
3027
3028/**
3029 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3030 * @data: Pointer to ATA port to perform scsi_rescan_device()
3031 *
3032 * After ATA pass thru (SAT) commands are executed successfully,
3033 * libata need to propagate the changes to SCSI layer. This
3034 * function must be executed from ata_aux_wq such that sdev
3035 * attach/detach don't race with rescan.
3036 *
3037 * LOCKING:
3038 * Kernel thread context (may sleep).
3039 */
3040void ata_scsi_dev_rescan(void *data)
3041{
3042 struct ata_port *ap = data;
3043 struct ata_device *dev;
3044 unsigned int i;
3045
3046 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3047 dev = &ap->device[i];
3048
3049 if (ata_dev_enabled(dev) && dev->sdev)
3050 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3051 }
3052}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
deleted file mode 100644
index bdd488897096..000000000000
--- a/drivers/scsi/libata.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * libata.h - helper library for ATA
3 *
4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 * Copyright 2003-2004 Jeff Garzik
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 */
27
28#ifndef __LIBATA_H__
29#define __LIBATA_H__
30
31#define DRV_NAME "libata"
32#define DRV_VERSION "1.30" /* must be exactly four chars */
33
34struct ata_scsi_args {
35 struct ata_device *dev;
36 u16 *id;
37 struct scsi_cmnd *cmd;
38 void (*done)(struct scsi_cmnd *);
39};
40
41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled;
44extern int atapi_dmadir;
45extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
54 int post_reset, u16 *id);
55extern int ata_dev_configure(struct ata_device *dev, int print_info);
56extern int sata_down_spd_limit(struct ata_port *ap);
57extern int sata_set_spd_needed(struct ata_port *ap);
58extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
59extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
60extern void ata_qc_free(struct ata_queued_cmd *qc);
61extern void ata_qc_issue(struct ata_queued_cmd *qc);
62extern void __ata_qc_complete(struct ata_queued_cmd *qc);
63extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
64extern void ata_dev_select(struct ata_port *ap, unsigned int device,
65 unsigned int wait, unsigned int can_sleep);
66extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
67extern void ata_dev_init(struct ata_device *dev);
68extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
69extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
70
71
72/* libata-scsi.c */
73extern struct scsi_transport_template ata_scsi_transport_template;
74
75extern void ata_scsi_scan_host(struct ata_port *ap);
76extern int ata_scsi_offline_dev(struct ata_device *dev);
77extern void ata_scsi_hotplug(void *data);
78extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
79 unsigned int buflen);
80
81extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
82 unsigned int buflen);
83
84extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
85 unsigned int buflen);
86extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
87 unsigned int buflen);
88extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
89 unsigned int buflen);
90extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
91 unsigned int buflen);
92extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
93 unsigned int buflen);
94extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
95 unsigned int buflen);
96extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
97 unsigned int buflen);
98extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
99 void (*done)(struct scsi_cmnd *),
100 u8 asc, u8 ascq);
101extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
102 u8 sk, u8 asc, u8 ascq);
103extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
104 unsigned int (*actor) (struct ata_scsi_args *args,
105 u8 *rbuf, unsigned int buflen));
106extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
107extern void ata_scsi_dev_rescan(void *data);
108
109/* libata-eh.c */
110extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
111extern void ata_scsi_error(struct Scsi_Host *host);
112extern void ata_port_wait_eh(struct ata_port *ap);
113extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
114
115#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 2673a11a9495..c542d0e95e68 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -68,8 +68,7 @@ iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); 68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
69 69
70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
71 struct iscsi_data *hdr, 71 struct iscsi_data *hdr)
72 int transport_data_cnt)
73{ 72{
74 struct iscsi_conn *conn = ctask->conn; 73 struct iscsi_conn *conn = ctask->conn;
75 74
@@ -82,14 +81,12 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
82 81
83 hdr->itt = ctask->hdr->itt; 82 hdr->itt = ctask->hdr->itt;
84 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 83 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
85 84 hdr->offset = cpu_to_be32(ctask->unsol_offset);
86 hdr->offset = cpu_to_be32(ctask->total_length -
87 transport_data_cnt -
88 ctask->unsol_count);
89 85
90 if (ctask->unsol_count > conn->max_xmit_dlength) { 86 if (ctask->unsol_count > conn->max_xmit_dlength) {
91 hton24(hdr->dlength, conn->max_xmit_dlength); 87 hton24(hdr->dlength, conn->max_xmit_dlength);
92 ctask->data_count = conn->max_xmit_dlength; 88 ctask->data_count = conn->max_xmit_dlength;
89 ctask->unsol_offset += ctask->data_count;
93 hdr->flags = 0; 90 hdr->flags = 0;
94 } else { 91 } else {
95 hton24(hdr->dlength, ctask->unsol_count); 92 hton24(hdr->dlength, ctask->unsol_count);
@@ -125,6 +122,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
125 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); 122 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
126 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); 123 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
127 124
125 ctask->data_count = 0;
128 if (sc->sc_data_direction == DMA_TO_DEVICE) { 126 if (sc->sc_data_direction == DMA_TO_DEVICE) {
129 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 127 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
130 /* 128 /*
@@ -143,6 +141,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
143 */ 141 */
144 ctask->imm_count = 0; 142 ctask->imm_count = 0;
145 ctask->unsol_count = 0; 143 ctask->unsol_count = 0;
144 ctask->unsol_offset = 0;
146 ctask->unsol_datasn = 0; 145 ctask->unsol_datasn = 0;
147 146
148 if (session->imm_data_en) { 147 if (session->imm_data_en) {
@@ -156,9 +155,12 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
156 } else 155 } else
157 zero_data(ctask->hdr->dlength); 156 zero_data(ctask->hdr->dlength);
158 157
159 if (!session->initial_r2t_en) 158 if (!session->initial_r2t_en) {
160 ctask->unsol_count = min(session->first_burst, 159 ctask->unsol_count = min(session->first_burst,
161 ctask->total_length) - ctask->imm_count; 160 ctask->total_length) - ctask->imm_count;
161 ctask->unsol_offset = ctask->imm_count;
162 }
163
162 if (!ctask->unsol_count) 164 if (!ctask->unsol_count)
163 /* No unsolicit Data-Out's */ 165 /* No unsolicit Data-Out's */
164 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; 166 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -177,24 +179,51 @@ EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
177 179
178/** 180/**
179 * iscsi_complete_command - return command back to scsi-ml 181 * iscsi_complete_command - return command back to scsi-ml
180 * @session: iscsi session
181 * @ctask: iscsi cmd task 182 * @ctask: iscsi cmd task
182 * 183 *
183 * Must be called with session lock. 184 * Must be called with session lock.
184 * This function returns the scsi command to scsi-ml and returns 185 * This function returns the scsi command to scsi-ml and returns
185 * the cmd task to the pool of available cmd tasks. 186 * the cmd task to the pool of available cmd tasks.
186 */ 187 */
187static void iscsi_complete_command(struct iscsi_session *session, 188static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
188 struct iscsi_cmd_task *ctask)
189{ 189{
190 struct iscsi_session *session = ctask->conn->session;
190 struct scsi_cmnd *sc = ctask->sc; 191 struct scsi_cmnd *sc = ctask->sc;
191 192
193 ctask->state = ISCSI_TASK_COMPLETED;
192 ctask->sc = NULL; 194 ctask->sc = NULL;
195 /* SCSI eh reuses commands to verify us */
196 sc->SCp.ptr = NULL;
193 list_del_init(&ctask->running); 197 list_del_init(&ctask->running);
194 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 198 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
195 sc->scsi_done(sc); 199 sc->scsi_done(sc);
196} 200}
197 201
202static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
203{
204 atomic_inc(&ctask->refcount);
205}
206
207static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
208{
209 spin_lock_bh(&ctask->conn->session->lock);
210 __iscsi_get_ctask(ctask);
211 spin_unlock_bh(&ctask->conn->session->lock);
212}
213
214static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
215{
216 if (atomic_dec_and_test(&ctask->refcount))
217 iscsi_complete_command(ctask);
218}
219
220static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
221{
222 spin_lock_bh(&ctask->conn->session->lock);
223 __iscsi_put_ctask(ctask);
224 spin_unlock_bh(&ctask->conn->session->lock);
225}
226
198/** 227/**
199 * iscsi_cmd_rsp - SCSI Command Response processing 228 * iscsi_cmd_rsp - SCSI Command Response processing
200 * @conn: iscsi connection 229 * @conn: iscsi connection
@@ -271,10 +300,53 @@ out:
271 (long)sc, sc->result, ctask->itt); 300 (long)sc, sc->result, ctask->itt);
272 conn->scsirsp_pdus_cnt++; 301 conn->scsirsp_pdus_cnt++;
273 302
274 iscsi_complete_command(conn->session, ctask); 303 __iscsi_put_ctask(ctask);
275 return rc; 304 return rc;
276} 305}
277 306
307static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
308{
309 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
310
311 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
312 conn->tmfrsp_pdus_cnt++;
313
314 if (conn->tmabort_state != TMABORT_INITIAL)
315 return;
316
317 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
318 conn->tmabort_state = TMABORT_SUCCESS;
319 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
320 conn->tmabort_state = TMABORT_NOT_FOUND;
321 else
322 conn->tmabort_state = TMABORT_FAILED;
323 wake_up(&conn->ehwait);
324}
325
326static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
327 char *data, int datalen)
328{
329 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
330 struct iscsi_hdr rejected_pdu;
331 uint32_t itt;
332
333 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
334
335 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
336 if (ntoh24(reject->dlength) > datalen)
337 return ISCSI_ERR_PROTO;
338
339 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
340 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
341 itt = rejected_pdu.itt & ISCSI_ITT_MASK;
342 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
343 "due to DataDigest error.\n", itt,
344 rejected_pdu.opcode);
345 }
346 }
347 return 0;
348}
349
278/** 350/**
279 * __iscsi_complete_pdu - complete pdu 351 * __iscsi_complete_pdu - complete pdu
280 * @conn: iscsi conn 352 * @conn: iscsi conn
@@ -316,7 +388,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
316 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 388 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
317 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 389 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
318 conn->scsirsp_pdus_cnt++; 390 conn->scsirsp_pdus_cnt++;
319 iscsi_complete_command(session, ctask); 391 __iscsi_put_ctask(ctask);
320 } 392 }
321 break; 393 break;
322 case ISCSI_OP_R2T: 394 case ISCSI_OP_R2T:
@@ -340,6 +412,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
340 412
341 switch(opcode) { 413 switch(opcode) {
342 case ISCSI_OP_LOGOUT_RSP: 414 case ISCSI_OP_LOGOUT_RSP:
415 if (datalen) {
416 rc = ISCSI_ERR_PROTO;
417 break;
418 }
343 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 419 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
344 /* fall through */ 420 /* fall through */
345 case ISCSI_OP_LOGIN_RSP: 421 case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +424,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
348 * login related PDU's exp_statsn is handled in 424 * login related PDU's exp_statsn is handled in
349 * userspace 425 * userspace
350 */ 426 */
351 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 427 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
428 rc = ISCSI_ERR_CONN_FAILED;
352 list_del(&mtask->running); 429 list_del(&mtask->running);
353 if (conn->login_mtask != mtask) 430 if (conn->login_mtask != mtask)
354 __kfifo_put(session->mgmtpool.queue, 431 __kfifo_put(session->mgmtpool.queue,
@@ -360,25 +437,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
360 break; 437 break;
361 } 438 }
362 439
363 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 440 iscsi_tmf_rsp(conn, hdr);
364 conn->tmfrsp_pdus_cnt++;
365 if (conn->tmabort_state == TMABORT_INITIAL) {
366 conn->tmabort_state =
367 ((struct iscsi_tm_rsp *)hdr)->
368 response == ISCSI_TMF_RSP_COMPLETE ?
369 TMABORT_SUCCESS:TMABORT_FAILED;
370 /* unblock eh_abort() */
371 wake_up(&conn->ehwait);
372 }
373 break; 441 break;
374 case ISCSI_OP_NOOP_IN: 442 case ISCSI_OP_NOOP_IN:
375 if (hdr->ttt != ISCSI_RESERVED_TAG) { 443 if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
376 rc = ISCSI_ERR_PROTO; 444 rc = ISCSI_ERR_PROTO;
377 break; 445 break;
378 } 446 }
379 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 447 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
380 448
381 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 449 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
450 rc = ISCSI_ERR_CONN_FAILED;
382 list_del(&mtask->running); 451 list_del(&mtask->running);
383 if (conn->login_mtask != mtask) 452 if (conn->login_mtask != mtask)
384 __kfifo_put(session->mgmtpool.queue, 453 __kfifo_put(session->mgmtpool.queue,
@@ -389,19 +458,27 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
389 break; 458 break;
390 } 459 }
391 } else if (itt == ISCSI_RESERVED_TAG) { 460 } else if (itt == ISCSI_RESERVED_TAG) {
461 rc = iscsi_check_assign_cmdsn(session,
462 (struct iscsi_nopin*)hdr);
463 if (rc)
464 goto done;
465
392 switch(opcode) { 466 switch(opcode) {
393 case ISCSI_OP_NOOP_IN: 467 case ISCSI_OP_NOOP_IN:
394 if (!datalen) { 468 if (datalen) {
395 rc = iscsi_check_assign_cmdsn(session,
396 (struct iscsi_nopin*)hdr);
397 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
398 rc = iscsi_recv_pdu(conn->cls_conn,
399 hdr, NULL, 0);
400 } else
401 rc = ISCSI_ERR_PROTO; 469 rc = ISCSI_ERR_PROTO;
470 break;
471 }
472
473 if (hdr->ttt == ISCSI_RESERVED_TAG)
474 break;
475
476 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
477 rc = ISCSI_ERR_CONN_FAILED;
402 break; 478 break;
403 case ISCSI_OP_REJECT: 479 case ISCSI_OP_REJECT:
404 /* we need sth like iscsi_reject_rsp()*/ 480 rc = iscsi_handle_reject(conn, hdr, data, datalen);
481 break;
405 case ISCSI_OP_ASYNC_EVENT: 482 case ISCSI_OP_ASYNC_EVENT:
406 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 483 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
407 /* we need sth like iscsi_async_event_rsp() */ 484 /* we need sth like iscsi_async_event_rsp() */
@@ -537,7 +614,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
537 BUG_ON(conn->ctask && conn->mtask); 614 BUG_ON(conn->ctask && conn->mtask);
538 615
539 if (conn->ctask) { 616 if (conn->ctask) {
617 iscsi_get_ctask(conn->ctask);
540 rc = tt->xmit_cmd_task(conn, conn->ctask); 618 rc = tt->xmit_cmd_task(conn, conn->ctask);
619 iscsi_put_ctask(conn->ctask);
541 if (rc) 620 if (rc)
542 goto again; 621 goto again;
543 /* done with this in-progress ctask */ 622 /* done with this in-progress ctask */
@@ -568,20 +647,31 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
568 } 647 }
569 648
570 /* process command queue */ 649 /* process command queue */
571 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, 650 spin_lock_bh(&conn->session->lock);
572 sizeof(void*))) { 651 while (!list_empty(&conn->xmitqueue)) {
573 /* 652 /*
574 * iscsi tcp may readd the task to the xmitqueue to send 653 * iscsi tcp may readd the task to the xmitqueue to send
575 * write data 654 * write data
576 */ 655 */
577 spin_lock_bh(&conn->session->lock); 656 conn->ctask = list_entry(conn->xmitqueue.next,
578 if (list_empty(&conn->ctask->running)) 657 struct iscsi_cmd_task, running);
579 list_add_tail(&conn->ctask->running, &conn->run_list); 658 conn->ctask->state = ISCSI_TASK_RUNNING;
659 list_move_tail(conn->xmitqueue.next, &conn->run_list);
660 __iscsi_get_ctask(conn->ctask);
580 spin_unlock_bh(&conn->session->lock); 661 spin_unlock_bh(&conn->session->lock);
662
581 rc = tt->xmit_cmd_task(conn, conn->ctask); 663 rc = tt->xmit_cmd_task(conn, conn->ctask);
582 if (rc) 664 if (rc)
583 goto again; 665 goto again;
666
667 spin_lock_bh(&conn->session->lock);
668 __iscsi_put_ctask(conn->ctask);
669 if (rc) {
670 spin_unlock_bh(&conn->session->lock);
671 goto again;
672 }
584 } 673 }
674 spin_unlock_bh(&conn->session->lock);
585 /* done with this ctask */ 675 /* done with this ctask */
586 conn->ctask = NULL; 676 conn->ctask = NULL;
587 677
@@ -629,6 +719,7 @@ enum {
629 FAILURE_SESSION_FAILED, 719 FAILURE_SESSION_FAILED,
630 FAILURE_SESSION_FREED, 720 FAILURE_SESSION_FREED,
631 FAILURE_WINDOW_CLOSED, 721 FAILURE_WINDOW_CLOSED,
722 FAILURE_OOM,
632 FAILURE_SESSION_TERMINATE, 723 FAILURE_SESSION_TERMINATE,
633 FAILURE_SESSION_IN_RECOVERY, 724 FAILURE_SESSION_IN_RECOVERY,
634 FAILURE_SESSION_RECOVERY_TIMEOUT, 725 FAILURE_SESSION_RECOVERY_TIMEOUT,
@@ -644,6 +735,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
644 735
645 sc->scsi_done = done; 736 sc->scsi_done = done;
646 sc->result = 0; 737 sc->result = 0;
738 sc->SCp.ptr = NULL;
647 739
648 host = sc->device->host; 740 host = sc->device->host;
649 session = iscsi_hostdata(host->hostdata); 741 session = iscsi_hostdata(host->hostdata);
@@ -687,10 +779,16 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
687 779
688 conn = session->leadconn; 780 conn = session->leadconn;
689 781
690 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 782 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
783 sizeof(void*))) {
784 reason = FAILURE_OOM;
785 goto reject;
786 }
691 sc->SCp.phase = session->age; 787 sc->SCp.phase = session->age;
692 sc->SCp.ptr = (char *)ctask; 788 sc->SCp.ptr = (char *)ctask;
693 789
790 atomic_set(&ctask->refcount, 1);
791 ctask->state = ISCSI_TASK_PENDING;
694 ctask->mtask = NULL; 792 ctask->mtask = NULL;
695 ctask->conn = conn; 793 ctask->conn = conn;
696 ctask->sc = sc; 794 ctask->sc = sc;
@@ -700,11 +798,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
700 798
701 session->tt->init_cmd_task(ctask); 799 session->tt->init_cmd_task(ctask);
702 800
703 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 801 list_add_tail(&ctask->running, &conn->xmitqueue);
704 debug_scsi( 802 debug_scsi(
705 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 803 "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
804 "win %d]\n",
706 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 805 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
707 conn->id, (long)sc, ctask->itt, sc->request_bufflen, 806 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
708 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 807 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
709 spin_unlock(&session->lock); 808 spin_unlock(&session->lock);
710 809
@@ -977,31 +1076,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
977/* 1076/*
978 * xmit mutex and session lock must be held 1077 * xmit mutex and session lock must be held
979 */ 1078 */
980#define iscsi_remove_task(tasktype) \ 1079static struct iscsi_mgmt_task *
981static struct iscsi_##tasktype * \ 1080iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
982iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ 1081{
983{ \ 1082 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
984 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ 1083 struct iscsi_mgmt_task *task;
985 struct iscsi_##tasktype *task; \ 1084
986 \ 1085 debug_scsi("searching %d tasks\n", nr_tasks);
987 debug_scsi("searching %d tasks\n", nr_tasks); \ 1086
988 \ 1087 for (i = 0; i < nr_tasks; i++) {
989 for (i = 0; i < nr_tasks; i++) { \ 1088 __kfifo_get(fifo, (void*)&task, sizeof(void*));
990 __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ 1089 debug_scsi("check task %u\n", task->itt);
991 debug_scsi("check task %u\n", task->itt); \ 1090
992 \ 1091 if (task->itt == itt) {
993 if (task->itt == itt) { \ 1092 debug_scsi("matched task\n");
994 debug_scsi("matched task\n"); \ 1093 return task;
995 return task; \ 1094 }
996 } \
997 \
998 __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
999 } \
1000 return NULL; \
1001}
1002 1095
1003iscsi_remove_task(mgmt_task); 1096 __kfifo_put(fifo, (void*)&task, sizeof(void*));
1004iscsi_remove_task(cmd_task); 1097 }
1098 return NULL;
1099}
1005 1100
1006static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) 1101static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1007{ 1102{
@@ -1027,25 +1122,39 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1027{ 1122{
1028 struct scsi_cmnd *sc; 1123 struct scsi_cmnd *sc;
1029 1124
1030 conn->session->tt->cleanup_cmd_task(conn, ctask);
1031 iscsi_ctask_mtask_cleanup(ctask);
1032
1033 sc = ctask->sc; 1125 sc = ctask->sc;
1034 if (!sc) 1126 if (!sc)
1035 return; 1127 return;
1128
1129 conn->session->tt->cleanup_cmd_task(conn, ctask);
1130 iscsi_ctask_mtask_cleanup(ctask);
1131
1036 sc->result = err; 1132 sc->result = err;
1037 sc->resid = sc->request_bufflen; 1133 sc->resid = sc->request_bufflen;
1038 iscsi_complete_command(conn->session, ctask); 1134 /* release ref from queuecommand */
1135 __iscsi_put_ctask(ctask);
1039} 1136}
1040 1137
1041int iscsi_eh_abort(struct scsi_cmnd *sc) 1138int iscsi_eh_abort(struct scsi_cmnd *sc)
1042{ 1139{
1043 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1140 struct iscsi_cmd_task *ctask;
1044 struct iscsi_conn *conn = ctask->conn; 1141 struct iscsi_conn *conn;
1045 struct iscsi_session *session = conn->session; 1142 struct iscsi_session *session;
1046 struct iscsi_cmd_task *pending_ctask;
1047 int rc; 1143 int rc;
1048 1144
1145 /*
1146 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1147 * got the command.
1148 */
1149 if (!sc->SCp.ptr) {
1150 debug_scsi("sc never reached iscsi layer or it completed.\n");
1151 return SUCCESS;
1152 }
1153
1154 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1155 conn = ctask->conn;
1156 session = conn->session;
1157
1049 conn->eh_abort_cnt++; 1158 conn->eh_abort_cnt++;
1050 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1159 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1051 1160
@@ -1061,8 +1170,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1061 goto failed; 1170 goto failed;
1062 1171
1063 /* ctask completed before time out */ 1172 /* ctask completed before time out */
1064 if (!ctask->sc) 1173 if (!ctask->sc) {
1065 goto success; 1174 spin_unlock_bh(&session->lock);
1175 debug_scsi("sc completed while abort in progress\n");
1176 goto success_rel_mutex;
1177 }
1066 1178
1067 /* what should we do here ? */ 1179 /* what should we do here ? */
1068 if (conn->ctask == ctask) { 1180 if (conn->ctask == ctask) {
@@ -1071,17 +1183,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1071 goto failed; 1183 goto failed;
1072 } 1184 }
1073 1185
1074 /* check for the easy pending cmd abort */ 1186 if (ctask->state == ISCSI_TASK_PENDING)
1075 pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); 1187 goto success_cleanup;
1076 if (pending_ctask) {
1077 /* iscsi_tcp queues write transfers on the xmitqueue */
1078 if (list_empty(&pending_ctask->running)) {
1079 debug_scsi("found pending task\n");
1080 goto success;
1081 } else
1082 __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
1083 sizeof(void*));
1084 }
1085 1188
1086 conn->tmabort_state = TMABORT_INITIAL; 1189 conn->tmabort_state = TMABORT_INITIAL;
1087 1190
@@ -1089,25 +1192,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1089 rc = iscsi_exec_abort_task(sc, ctask); 1192 rc = iscsi_exec_abort_task(sc, ctask);
1090 spin_lock_bh(&session->lock); 1193 spin_lock_bh(&session->lock);
1091 1194
1092 iscsi_ctask_mtask_cleanup(ctask);
1093 if (rc || sc->SCp.phase != session->age || 1195 if (rc || sc->SCp.phase != session->age ||
1094 session->state != ISCSI_STATE_LOGGED_IN) 1196 session->state != ISCSI_STATE_LOGGED_IN)
1095 goto failed; 1197 goto failed;
1198 iscsi_ctask_mtask_cleanup(ctask);
1096 1199
1097 /* ctask completed before tmf abort response */ 1200 switch (conn->tmabort_state) {
1098 if (!ctask->sc) { 1201 case TMABORT_SUCCESS:
1099 debug_scsi("sc completed while abort in progress\n"); 1202 goto success_cleanup;
1100 goto success; 1203 case TMABORT_NOT_FOUND:
1101 } 1204 if (!ctask->sc) {
1102 1205 /* ctask completed before tmf abort response */
1103 if (conn->tmabort_state != TMABORT_SUCCESS) { 1206 spin_unlock_bh(&session->lock);
1207 debug_scsi("sc completed while abort in progress\n");
1208 goto success_rel_mutex;
1209 }
1210 /* fall through */
1211 default:
1212 /* timedout or failed */
1104 spin_unlock_bh(&session->lock); 1213 spin_unlock_bh(&session->lock);
1105 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1214 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1106 spin_lock_bh(&session->lock); 1215 spin_lock_bh(&session->lock);
1107 goto failed; 1216 goto failed;
1108 } 1217 }
1109 1218
1110success: 1219success_cleanup:
1111 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1220 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1112 spin_unlock_bh(&session->lock); 1221 spin_unlock_bh(&session->lock);
1113 1222
@@ -1121,6 +1230,7 @@ success:
1121 spin_unlock(&session->lock); 1230 spin_unlock(&session->lock);
1122 write_unlock_bh(conn->recv_lock); 1231 write_unlock_bh(conn->recv_lock);
1123 1232
1233success_rel_mutex:
1124 mutex_unlock(&conn->xmitmutex); 1234 mutex_unlock(&conn->xmitmutex);
1125 return SUCCESS; 1235 return SUCCESS;
1126 1236
@@ -1263,6 +1373,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1263 if (cmd_task_size) 1373 if (cmd_task_size)
1264 ctask->dd_data = &ctask[1]; 1374 ctask->dd_data = &ctask[1];
1265 ctask->itt = cmd_i; 1375 ctask->itt = cmd_i;
1376 INIT_LIST_HEAD(&ctask->running);
1266 } 1377 }
1267 1378
1268 spin_lock_init(&session->lock); 1379 spin_lock_init(&session->lock);
@@ -1282,18 +1393,24 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1282 if (mgmt_task_size) 1393 if (mgmt_task_size)
1283 mtask->dd_data = &mtask[1]; 1394 mtask->dd_data = &mtask[1];
1284 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; 1395 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1396 INIT_LIST_HEAD(&mtask->running);
1285 } 1397 }
1286 1398
1287 if (scsi_add_host(shost, NULL)) 1399 if (scsi_add_host(shost, NULL))
1288 goto add_host_fail; 1400 goto add_host_fail;
1289 1401
1402 if (!try_module_get(iscsit->owner))
1403 goto cls_session_fail;
1404
1290 cls_session = iscsi_create_session(shost, iscsit, 0); 1405 cls_session = iscsi_create_session(shost, iscsit, 0);
1291 if (!cls_session) 1406 if (!cls_session)
1292 goto cls_session_fail; 1407 goto module_put;
1293 *(unsigned long*)shost->hostdata = (unsigned long)cls_session; 1408 *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1294 1409
1295 return cls_session; 1410 return cls_session;
1296 1411
1412module_put:
1413 module_put(iscsit->owner);
1297cls_session_fail: 1414cls_session_fail:
1298 scsi_remove_host(shost); 1415 scsi_remove_host(shost);
1299add_host_fail: 1416add_host_fail:
@@ -1317,14 +1434,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1317{ 1434{
1318 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1435 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1319 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 1436 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1437 struct module *owner = cls_session->transport->owner;
1320 1438
1321 scsi_remove_host(shost); 1439 scsi_remove_host(shost);
1322 1440
1323 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1441 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
1324 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1442 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
1325 1443
1444 kfree(session->targetname);
1445
1326 iscsi_destroy_session(cls_session); 1446 iscsi_destroy_session(cls_session);
1327 scsi_host_put(shost); 1447 scsi_host_put(shost);
1448 module_put(owner);
1328} 1449}
1329EXPORT_SYMBOL_GPL(iscsi_session_teardown); 1450EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1330 1451
@@ -1355,12 +1476,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1355 conn->tmabort_state = TMABORT_INITIAL; 1476 conn->tmabort_state = TMABORT_INITIAL;
1356 INIT_LIST_HEAD(&conn->run_list); 1477 INIT_LIST_HEAD(&conn->run_list);
1357 INIT_LIST_HEAD(&conn->mgmt_run_list); 1478 INIT_LIST_HEAD(&conn->mgmt_run_list);
1358 1479 INIT_LIST_HEAD(&conn->xmitqueue);
1359 /* initialize general xmit PDU commands queue */
1360 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
1361 GFP_KERNEL, NULL);
1362 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
1363 goto xmitqueue_alloc_fail;
1364 1480
1365 /* initialize general immediate & non-immediate PDU commands queue */ 1481 /* initialize general immediate & non-immediate PDU commands queue */
1366 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1482 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1388,7 +1504,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1388 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1504 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
1389 if (!data) 1505 if (!data)
1390 goto login_mtask_data_alloc_fail; 1506 goto login_mtask_data_alloc_fail;
1391 conn->login_mtask->data = data; 1507 conn->login_mtask->data = conn->data = data;
1392 1508
1393 init_timer(&conn->tmabort_timer); 1509 init_timer(&conn->tmabort_timer);
1394 mutex_init(&conn->xmitmutex); 1510 mutex_init(&conn->xmitmutex);
@@ -1404,8 +1520,6 @@ login_mtask_alloc_fail:
1404mgmtqueue_alloc_fail: 1520mgmtqueue_alloc_fail:
1405 kfifo_free(conn->immqueue); 1521 kfifo_free(conn->immqueue);
1406immqueue_alloc_fail: 1522immqueue_alloc_fail:
1407 kfifo_free(conn->xmitqueue);
1408xmitqueue_alloc_fail:
1409 iscsi_destroy_conn(cls_conn); 1523 iscsi_destroy_conn(cls_conn);
1410 return NULL; 1524 return NULL;
1411} 1525}
@@ -1426,12 +1540,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1426 1540
1427 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1541 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1428 mutex_lock(&conn->xmitmutex); 1542 mutex_lock(&conn->xmitmutex);
1429 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
1430 if (session->tt->suspend_conn_recv)
1431 session->tt->suspend_conn_recv(conn);
1432
1433 session->tt->terminate_conn(conn);
1434 }
1435 1543
1436 spin_lock_bh(&session->lock); 1544 spin_lock_bh(&session->lock);
1437 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1545 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1468,7 +1576,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1468 } 1576 }
1469 1577
1470 spin_lock_bh(&session->lock); 1578 spin_lock_bh(&session->lock);
1471 kfree(conn->login_mtask->data); 1579 kfree(conn->data);
1580 kfree(conn->persistent_address);
1472 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 1581 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
1473 sizeof(void*)); 1582 sizeof(void*));
1474 list_del(&conn->item); 1583 list_del(&conn->item);
@@ -1483,7 +1592,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1483 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; 1592 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
1484 spin_unlock_bh(&session->lock); 1593 spin_unlock_bh(&session->lock);
1485 1594
1486 kfifo_free(conn->xmitqueue);
1487 kfifo_free(conn->immqueue); 1595 kfifo_free(conn->immqueue);
1488 kfifo_free(conn->mgmtqueue); 1596 kfifo_free(conn->mgmtqueue);
1489 1597
@@ -1496,11 +1604,19 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1496 struct iscsi_conn *conn = cls_conn->dd_data; 1604 struct iscsi_conn *conn = cls_conn->dd_data;
1497 struct iscsi_session *session = conn->session; 1605 struct iscsi_session *session = conn->session;
1498 1606
1499 if (session == NULL) { 1607 if (!session) {
1500 printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1608 printk(KERN_ERR "iscsi: can't start unbound connection\n");
1501 return -EPERM; 1609 return -EPERM;
1502 } 1610 }
1503 1611
1612 if ((session->imm_data_en || !session->initial_r2t_en) &&
1613 session->first_burst > session->max_burst) {
1614 printk("iscsi: invalid burst lengths: "
1615 "first_burst %d max_burst %d\n",
1616 session->first_burst, session->max_burst);
1617 return -EINVAL;
1618 }
1619
1504 spin_lock_bh(&session->lock); 1620 spin_lock_bh(&session->lock);
1505 conn->c_stage = ISCSI_CONN_STARTED; 1621 conn->c_stage = ISCSI_CONN_STARTED;
1506 session->state = ISCSI_STATE_LOGGED_IN; 1622 session->state = ISCSI_STATE_LOGGED_IN;
@@ -1566,7 +1682,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
1566 struct iscsi_cmd_task *ctask, *tmp; 1682 struct iscsi_cmd_task *ctask, *tmp;
1567 1683
1568 /* flush pending */ 1684 /* flush pending */
1569 while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { 1685 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1570 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, 1686 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
1571 ctask->itt); 1687 ctask->itt);
1572 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1688 fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1609,8 +1725,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1609 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1725 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1610 spin_unlock_bh(&session->lock); 1726 spin_unlock_bh(&session->lock);
1611 1727
1612 if (session->tt->suspend_conn_recv) 1728 write_lock_bh(conn->recv_lock);
1613 session->tt->suspend_conn_recv(conn); 1729 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1730 write_unlock_bh(conn->recv_lock);
1614 1731
1615 mutex_lock(&conn->xmitmutex); 1732 mutex_lock(&conn->xmitmutex);
1616 /* 1733 /*
@@ -1629,7 +1746,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1629 } 1746 }
1630 } 1747 }
1631 1748
1632 session->tt->terminate_conn(conn);
1633 /* 1749 /*
1634 * flush queues. 1750 * flush queues.
1635 */ 1751 */
@@ -1697,6 +1813,185 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
1697} 1813}
1698EXPORT_SYMBOL_GPL(iscsi_conn_bind); 1814EXPORT_SYMBOL_GPL(iscsi_conn_bind);
1699 1815
1816
1817int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
1818 enum iscsi_param param, char *buf, int buflen)
1819{
1820 struct iscsi_conn *conn = cls_conn->dd_data;
1821 struct iscsi_session *session = conn->session;
1822 uint32_t value;
1823
1824 switch(param) {
1825 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1826 sscanf(buf, "%d", &conn->max_recv_dlength);
1827 break;
1828 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1829 sscanf(buf, "%d", &conn->max_xmit_dlength);
1830 break;
1831 case ISCSI_PARAM_HDRDGST_EN:
1832 sscanf(buf, "%d", &conn->hdrdgst_en);
1833 break;
1834 case ISCSI_PARAM_DATADGST_EN:
1835 sscanf(buf, "%d", &conn->datadgst_en);
1836 break;
1837 case ISCSI_PARAM_INITIAL_R2T_EN:
1838 sscanf(buf, "%d", &session->initial_r2t_en);
1839 break;
1840 case ISCSI_PARAM_MAX_R2T:
1841 sscanf(buf, "%d", &session->max_r2t);
1842 break;
1843 case ISCSI_PARAM_IMM_DATA_EN:
1844 sscanf(buf, "%d", &session->imm_data_en);
1845 break;
1846 case ISCSI_PARAM_FIRST_BURST:
1847 sscanf(buf, "%d", &session->first_burst);
1848 break;
1849 case ISCSI_PARAM_MAX_BURST:
1850 sscanf(buf, "%d", &session->max_burst);
1851 break;
1852 case ISCSI_PARAM_PDU_INORDER_EN:
1853 sscanf(buf, "%d", &session->pdu_inorder_en);
1854 break;
1855 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1856 sscanf(buf, "%d", &session->dataseq_inorder_en);
1857 break;
1858 case ISCSI_PARAM_ERL:
1859 sscanf(buf, "%d", &session->erl);
1860 break;
1861 case ISCSI_PARAM_IFMARKER_EN:
1862 sscanf(buf, "%d", &value);
1863 BUG_ON(value);
1864 break;
1865 case ISCSI_PARAM_OFMARKER_EN:
1866 sscanf(buf, "%d", &value);
1867 BUG_ON(value);
1868 break;
1869 case ISCSI_PARAM_EXP_STATSN:
1870 sscanf(buf, "%u", &conn->exp_statsn);
1871 break;
1872 case ISCSI_PARAM_TARGET_NAME:
1873 /* this should not change between logins */
1874 if (session->targetname)
1875 break;
1876
1877 session->targetname = kstrdup(buf, GFP_KERNEL);
1878 if (!session->targetname)
1879 return -ENOMEM;
1880 break;
1881 case ISCSI_PARAM_TPGT:
1882 sscanf(buf, "%d", &session->tpgt);
1883 break;
1884 case ISCSI_PARAM_PERSISTENT_PORT:
1885 sscanf(buf, "%d", &conn->persistent_port);
1886 break;
1887 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1888 /*
1889 * this is the address returned in discovery so it should
1890 * not change between logins.
1891 */
1892 if (conn->persistent_address)
1893 break;
1894
1895 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
1896 if (!conn->persistent_address)
1897 return -ENOMEM;
1898 break;
1899 default:
1900 return -ENOSYS;
1901 }
1902
1903 return 0;
1904}
1905EXPORT_SYMBOL_GPL(iscsi_set_param);
1906
1907int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
1908 enum iscsi_param param, char *buf)
1909{
1910 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1911 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1912 int len;
1913
1914 switch(param) {
1915 case ISCSI_PARAM_INITIAL_R2T_EN:
1916 len = sprintf(buf, "%d\n", session->initial_r2t_en);
1917 break;
1918 case ISCSI_PARAM_MAX_R2T:
1919 len = sprintf(buf, "%hu\n", session->max_r2t);
1920 break;
1921 case ISCSI_PARAM_IMM_DATA_EN:
1922 len = sprintf(buf, "%d\n", session->imm_data_en);
1923 break;
1924 case ISCSI_PARAM_FIRST_BURST:
1925 len = sprintf(buf, "%u\n", session->first_burst);
1926 break;
1927 case ISCSI_PARAM_MAX_BURST:
1928 len = sprintf(buf, "%u\n", session->max_burst);
1929 break;
1930 case ISCSI_PARAM_PDU_INORDER_EN:
1931 len = sprintf(buf, "%d\n", session->pdu_inorder_en);
1932 break;
1933 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1934 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
1935 break;
1936 case ISCSI_PARAM_ERL:
1937 len = sprintf(buf, "%d\n", session->erl);
1938 break;
1939 case ISCSI_PARAM_TARGET_NAME:
1940 len = sprintf(buf, "%s\n", session->targetname);
1941 break;
1942 case ISCSI_PARAM_TPGT:
1943 len = sprintf(buf, "%d\n", session->tpgt);
1944 break;
1945 default:
1946 return -ENOSYS;
1947 }
1948
1949 return len;
1950}
1951EXPORT_SYMBOL_GPL(iscsi_session_get_param);
1952
1953int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
1954 enum iscsi_param param, char *buf)
1955{
1956 struct iscsi_conn *conn = cls_conn->dd_data;
1957 int len;
1958
1959 switch(param) {
1960 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1961 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
1962 break;
1963 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1964 len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
1965 break;
1966 case ISCSI_PARAM_HDRDGST_EN:
1967 len = sprintf(buf, "%d\n", conn->hdrdgst_en);
1968 break;
1969 case ISCSI_PARAM_DATADGST_EN:
1970 len = sprintf(buf, "%d\n", conn->datadgst_en);
1971 break;
1972 case ISCSI_PARAM_IFMARKER_EN:
1973 len = sprintf(buf, "%d\n", conn->ifmarker_en);
1974 break;
1975 case ISCSI_PARAM_OFMARKER_EN:
1976 len = sprintf(buf, "%d\n", conn->ofmarker_en);
1977 break;
1978 case ISCSI_PARAM_EXP_STATSN:
1979 len = sprintf(buf, "%u\n", conn->exp_statsn);
1980 break;
1981 case ISCSI_PARAM_PERSISTENT_PORT:
1982 len = sprintf(buf, "%d\n", conn->persistent_port);
1983 break;
1984 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1985 len = sprintf(buf, "%s\n", conn->persistent_address);
1986 break;
1987 default:
1988 return -ENOSYS;
1989 }
1990
1991 return len;
1992}
1993EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
1994
1700MODULE_AUTHOR("Mike Christie"); 1995MODULE_AUTHOR("Mike Christie");
1701MODULE_DESCRIPTION("iSCSI library functions"); 1996MODULE_DESCRIPTION("iSCSI library functions");
1702MODULE_LICENSE("GPL"); 1997MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
new file mode 100644
index 000000000000..aafdc92f8312
--- /dev/null
+++ b/drivers/scsi/libsas/Kconfig
@@ -0,0 +1,39 @@
1#
2# Kernel configuration file for the SAS Class
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23#
24
25config SCSI_SAS_LIBSAS
26 tristate "SAS Domain Transport Attributes"
27 depends on SCSI
28 select SCSI_SAS_ATTRS
29 help
30 This provides transport specific helpers for SAS drivers which
31 use the domain device construct (like the aic94xxx).
32
33config SCSI_SAS_LIBSAS_DEBUG
34 bool "Compile the SAS Domain Transport Attributes in debug mode"
35 default y
36 depends on SCSI_SAS_LIBSAS
37 help
38 Compiles the SAS Layer in debug mode. In debug mode, the
39 SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
new file mode 100644
index 000000000000..44d972a3b4bd
--- /dev/null
+++ b/drivers/scsi/libsas/Makefile
@@ -0,0 +1,36 @@
1#
2# Kernel Makefile for the libsas helpers
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DSAS_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
29libsas-y += sas_init.o \
30 sas_phy.o \
31 sas_port.o \
32 sas_event.o \
33 sas_dump.o \
34 sas_discover.o \
35 sas_expander.o \
36 sas_scsi_host.o
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
new file mode 100644
index 000000000000..d977bd492d8d
--- /dev/null
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -0,0 +1,749 @@
1/*
2 * Serial Attached SCSI (SAS) Discover process
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_eh.h>
29#include "sas_internal.h"
30
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_transport_sas.h>
33#include "../scsi_sas_internal.h"
34
35/* ---------- Basic task processing for discovery purposes ---------- */
36
37void sas_init_dev(struct domain_device *dev)
38{
39 INIT_LIST_HEAD(&dev->siblings);
40 INIT_LIST_HEAD(&dev->dev_list_node);
41 switch (dev->dev_type) {
42 case SAS_END_DEV:
43 break;
44 case EDGE_DEV:
45 case FANOUT_DEV:
46 INIT_LIST_HEAD(&dev->ex_dev.children);
47 break;
48 case SATA_DEV:
49 case SATA_PM:
50 case SATA_PM_PORT:
51 INIT_LIST_HEAD(&dev->sata_dev.children);
52 break;
53 default:
54 break;
55 }
56}
57
58static void sas_task_timedout(unsigned long _task)
59{
60 struct sas_task *task = (void *) _task;
61 unsigned long flags;
62
63 spin_lock_irqsave(&task->task_state_lock, flags);
64 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
65 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
66 spin_unlock_irqrestore(&task->task_state_lock, flags);
67
68 complete(&task->completion);
69}
70
71static void sas_disc_task_done(struct sas_task *task)
72{
73 if (!del_timer(&task->timer))
74 return;
75 complete(&task->completion);
76}
77
78#define SAS_DEV_TIMEOUT 10
79
80/**
81 * sas_execute_task -- Basic task processing for discovery
82 * @task: the task to be executed
83 * @buffer: pointer to buffer to do I/O
84 * @size: size of @buffer
85 * @pci_dma_dir: PCI_DMA_...
86 */
87static int sas_execute_task(struct sas_task *task, void *buffer, int size,
88 int pci_dma_dir)
89{
90 int res = 0;
91 struct scatterlist *scatter = NULL;
92 struct task_status_struct *ts = &task->task_status;
93 int num_scatter = 0;
94 int retries = 0;
95 struct sas_internal *i =
96 to_sas_internal(task->dev->port->ha->core.shost->transportt);
97
98 if (pci_dma_dir != PCI_DMA_NONE) {
99 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
100 if (!scatter)
101 goto out;
102
103 sg_init_one(scatter, buffer, size);
104 num_scatter = 1;
105 }
106
107 task->task_proto = task->dev->tproto;
108 task->scatter = scatter;
109 task->num_scatter = num_scatter;
110 task->total_xfer_len = size;
111 task->data_dir = pci_dma_dir;
112 task->task_done = sas_disc_task_done;
113
114 for (retries = 0; retries < 5; retries++) {
115 task->task_state_flags = SAS_TASK_STATE_PENDING;
116 init_completion(&task->completion);
117
118 task->timer.data = (unsigned long) task;
119 task->timer.function = sas_task_timedout;
120 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
121 add_timer(&task->timer);
122
123 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
124 if (res) {
125 del_timer(&task->timer);
126 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
127 res);
128 goto ex_err;
129 }
130 wait_for_completion(&task->completion);
131 res = -ETASK;
132 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
133 int res2;
134 SAS_DPRINTK("task aborted, flags:0x%x\n",
135 task->task_state_flags);
136 res2 = i->dft->lldd_abort_task(task);
137 SAS_DPRINTK("came back from abort task\n");
138 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
139 if (res2 == TMF_RESP_FUNC_COMPLETE)
140 continue; /* Retry the task */
141 else
142 goto ex_err;
143 }
144 }
145 if (task->task_status.stat == SAM_BUSY ||
146 task->task_status.stat == SAM_TASK_SET_FULL ||
147 task->task_status.stat == SAS_QUEUE_FULL) {
148 SAS_DPRINTK("task: q busy, sleeping...\n");
149 schedule_timeout_interruptible(HZ);
150 } else if (task->task_status.stat == SAM_CHECK_COND) {
151 struct scsi_sense_hdr shdr;
152
153 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
154 &shdr)) {
155 SAS_DPRINTK("couldn't normalize sense\n");
156 continue;
157 }
158 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
159 (shdr.sense_key == 2 && shdr.asc == 4 &&
160 shdr.ascq == 1)) {
161 SAS_DPRINTK("device %016llx LUN: %016llx "
162 "powering up or not ready yet, "
163 "sleeping...\n",
164 SAS_ADDR(task->dev->sas_addr),
165 SAS_ADDR(task->ssp_task.LUN));
166
167 schedule_timeout_interruptible(5*HZ);
168 } else if (shdr.sense_key == 1) {
169 res = 0;
170 break;
171 } else if (shdr.sense_key == 5) {
172 break;
173 } else {
174 SAS_DPRINTK("dev %016llx LUN: %016llx "
175 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
176 "\n",
177 SAS_ADDR(task->dev->sas_addr),
178 SAS_ADDR(task->ssp_task.LUN),
179 shdr.sense_key,
180 shdr.asc, shdr.ascq);
181 }
182 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
183 task->task_status.stat != SAM_GOOD) {
184 SAS_DPRINTK("task finished with resp:0x%x, "
185 "stat:0x%x\n",
186 task->task_status.resp,
187 task->task_status.stat);
188 goto ex_err;
189 } else {
190 res = 0;
191 break;
192 }
193 }
194ex_err:
195 if (pci_dma_dir != PCI_DMA_NONE)
196 kfree(scatter);
197out:
198 return res;
199}
200
201/* ---------- Domain device discovery ---------- */
202
203/**
204 * sas_get_port_device -- Discover devices which caused port creation
205 * @port: pointer to struct sas_port of interest
206 *
207 * Devices directly attached to a HA port, have no parent. This is
208 * how we know they are (domain) "root" devices. All other devices
209 * do, and should have their "parent" pointer set appropriately as
210 * soon as a child device is discovered.
211 */
212static int sas_get_port_device(struct asd_sas_port *port)
213{
214 unsigned long flags;
215 struct asd_sas_phy *phy;
216 struct sas_rphy *rphy;
217 struct domain_device *dev;
218
219 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
220 if (!dev)
221 return -ENOMEM;
222
223 spin_lock_irqsave(&port->phy_list_lock, flags);
224 if (list_empty(&port->phy_list)) {
225 spin_unlock_irqrestore(&port->phy_list_lock, flags);
226 kfree(dev);
227 return -ENODEV;
228 }
229 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
230 spin_lock(&phy->frame_rcvd_lock);
231 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
232 (size_t)phy->frame_rcvd_size));
233 spin_unlock(&phy->frame_rcvd_lock);
234 spin_unlock_irqrestore(&port->phy_list_lock, flags);
235
236 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
237 struct dev_to_host_fis *fis =
238 (struct dev_to_host_fis *) dev->frame_rcvd;
239 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
240 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
241 && (fis->device & ~0x10) == 0)
242 dev->dev_type = SATA_PM;
243 else
244 dev->dev_type = SATA_DEV;
245 dev->tproto = SATA_PROTO;
246 } else {
247 struct sas_identify_frame *id =
248 (struct sas_identify_frame *) dev->frame_rcvd;
249 dev->dev_type = id->dev_type;
250 dev->iproto = id->initiator_bits;
251 dev->tproto = id->target_bits;
252 }
253
254 sas_init_dev(dev);
255
256 switch (dev->dev_type) {
257 case SAS_END_DEV:
258 rphy = sas_end_device_alloc(port->port);
259 break;
260 case EDGE_DEV:
261 rphy = sas_expander_alloc(port->port,
262 SAS_EDGE_EXPANDER_DEVICE);
263 break;
264 case FANOUT_DEV:
265 rphy = sas_expander_alloc(port->port,
266 SAS_FANOUT_EXPANDER_DEVICE);
267 break;
268 case SATA_DEV:
269 default:
270 printk("ERROR: Unidentified device type %d\n", dev->dev_type);
271 rphy = NULL;
272 break;
273 }
274
275 if (!rphy) {
276 kfree(dev);
277 return -ENODEV;
278 }
279 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
280 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
281 sas_fill_in_rphy(dev, rphy);
282 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
283 port->port_dev = dev;
284 dev->port = port;
285 dev->linkrate = port->linkrate;
286 dev->min_linkrate = port->linkrate;
287 dev->max_linkrate = port->linkrate;
288 dev->pathways = port->num_phys;
289 memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
290 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
291 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
292 port->disc.max_level = 0;
293
294 dev->rphy = rphy;
295 spin_lock(&port->dev_list_lock);
296 list_add_tail(&dev->dev_list_node, &port->dev_list);
297 spin_unlock(&port->dev_list_lock);
298
299 return 0;
300}
301
302/* ---------- Discover and Revalidate ---------- */
303
304/* ---------- SATA ---------- */
305
306static void sas_get_ata_command_set(struct domain_device *dev)
307{
308 struct dev_to_host_fis *fis =
309 (struct dev_to_host_fis *) dev->frame_rcvd;
310
311 if ((fis->sector_count == 1 && /* ATA */
312 fis->lbal == 1 &&
313 fis->lbam == 0 &&
314 fis->lbah == 0 &&
315 fis->device == 0)
316 ||
317 (fis->sector_count == 0 && /* CE-ATA (mATA) */
318 fis->lbal == 0 &&
319 fis->lbam == 0xCE &&
320 fis->lbah == 0xAA &&
321 (fis->device & ~0x10) == 0))
322
323 dev->sata_dev.command_set = ATA_COMMAND_SET;
324
325 else if ((fis->interrupt_reason == 1 && /* ATAPI */
326 fis->lbal == 1 &&
327 fis->byte_count_low == 0x14 &&
328 fis->byte_count_high == 0xEB &&
329 (fis->device & ~0x10) == 0))
330
331 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
332
333 else if ((fis->sector_count == 1 && /* SEMB */
334 fis->lbal == 1 &&
335 fis->lbam == 0x3C &&
336 fis->lbah == 0xC3 &&
337 fis->device == 0)
338 ||
339 (fis->interrupt_reason == 1 && /* SATA PM */
340 fis->lbal == 1 &&
341 fis->byte_count_low == 0x69 &&
342 fis->byte_count_high == 0x96 &&
343 (fis->device & ~0x10) == 0))
344
345 /* Treat it as a superset? */
346 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
347}
348
349/**
350 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
351 * @dev: the device to send the command to
352 * @command: the command register
353 * @features: the features register
354 * @buffer: pointer to buffer to do I/O
355 * @size: size of @buffer
356 * @pci_dma_dir: PCI_DMA_...
357 */
358static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
359 u8 features, void *buffer, int size,
360 int pci_dma_dir)
361{
362 int res = 0;
363 struct sas_task *task;
364 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
365 &dev->frame_rcvd[0];
366
367 res = -ENOMEM;
368 task = sas_alloc_task(GFP_KERNEL);
369 if (!task)
370 goto out;
371
372 task->dev = dev;
373
374 task->ata_task.fis.command = command;
375 task->ata_task.fis.features = features;
376 task->ata_task.fis.device = d2h_fis->device;
377 task->ata_task.retry_count = 1;
378
379 res = sas_execute_task(task, buffer, size, pci_dma_dir);
380
381 sas_free_task(task);
382out:
383 return res;
384}
385
386static void sas_sata_propagate_sas_addr(struct domain_device *dev)
387{
388 unsigned long flags;
389 struct asd_sas_port *port = dev->port;
390 struct asd_sas_phy *phy;
391
392 BUG_ON(dev->parent);
393
394 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
395 spin_lock_irqsave(&port->phy_list_lock, flags);
396 list_for_each_entry(phy, &port->phy_list, port_phy_el)
397 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
398 spin_unlock_irqrestore(&port->phy_list_lock, flags);
399}
400
401#define ATA_IDENTIFY_DEV 0xEC
402#define ATA_IDENTIFY_PACKET_DEV 0xA1
403#define ATA_SET_FEATURES 0xEF
404#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
405
406/**
407 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
408 * @dev: STP/SATA device of interest (ATA/ATAPI)
409 *
410 * The LLDD has already been notified of this device, so that we can
411 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
412 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
413 * performance for this device.
414 */
415static int sas_discover_sata_dev(struct domain_device *dev)
416{
417 int res;
418 __le16 *identify_x;
419 u8 command;
420
421 identify_x = kzalloc(512, GFP_KERNEL);
422 if (!identify_x)
423 return -ENOMEM;
424
425 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
426 dev->sata_dev.identify_device = identify_x;
427 command = ATA_IDENTIFY_DEV;
428 } else {
429 dev->sata_dev.identify_packet_device = identify_x;
430 command = ATA_IDENTIFY_PACKET_DEV;
431 }
432
433 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
434 PCI_DMA_FROMDEVICE);
435 if (res)
436 goto out_err;
437
438 /* lives on the media? */
439 if (le16_to_cpu(identify_x[0]) & 4) {
440 /* incomplete response */
441 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
442 "dev %llx\n", SAS_ADDR(dev->sas_addr));
443 if (!le16_to_cpu(identify_x[83] & (1<<6)))
444 goto cont1;
445 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
446 ATA_FEATURE_PUP_STBY_SPIN_UP,
447 NULL, 0, PCI_DMA_NONE);
448 if (res)
449 goto cont1;
450
451 schedule_timeout_interruptible(5*HZ); /* More time? */
452 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
453 PCI_DMA_FROMDEVICE);
454 if (res)
455 goto out_err;
456 }
457cont1:
458 /* Get WWN */
459 if (dev->port->oob_mode != SATA_OOB_MODE) {
460 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
461 SAS_ADDR_SIZE);
462 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
463 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
464 == 0x5000) {
465 int i;
466
467 for (i = 0; i < 4; i++) {
468 dev->sas_addr[2*i] =
469 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
470 dev->sas_addr[2*i+1] =
471 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
472 }
473 }
474 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
475 if (!dev->parent)
476 sas_sata_propagate_sas_addr(dev);
477
478 /* XXX Hint: register this SATA device with SATL.
479 When this returns, dev->sata_dev->lu is alive and
480 present.
481 sas_satl_register_dev(dev);
482 */
483 return 0;
484out_err:
485 dev->sata_dev.identify_packet_device = NULL;
486 dev->sata_dev.identify_device = NULL;
487 kfree(identify_x);
488 return res;
489}
490
491static int sas_discover_sata_pm(struct domain_device *dev)
492{
493 return -ENODEV;
494}
495
496int sas_notify_lldd_dev_found(struct domain_device *dev)
497{
498 int res = 0;
499 struct sas_ha_struct *sas_ha = dev->port->ha;
500 struct Scsi_Host *shost = sas_ha->core.shost;
501 struct sas_internal *i = to_sas_internal(shost->transportt);
502
503 if (i->dft->lldd_dev_found) {
504 res = i->dft->lldd_dev_found(dev);
505 if (res) {
506 printk("sas: driver on pcidev %s cannot handle "
507 "device %llx, error:%d\n",
508 pci_name(sas_ha->pcidev),
509 SAS_ADDR(dev->sas_addr), res);
510 }
511 }
512 return res;
513}
514
515
516void sas_notify_lldd_dev_gone(struct domain_device *dev)
517{
518 struct sas_ha_struct *sas_ha = dev->port->ha;
519 struct Scsi_Host *shost = sas_ha->core.shost;
520 struct sas_internal *i = to_sas_internal(shost->transportt);
521
522 if (i->dft->lldd_dev_gone)
523 i->dft->lldd_dev_gone(dev);
524}
525
526/* ---------- Common/dispatchers ---------- */
527
528/**
529 * sas_discover_sata -- discover an STP/SATA domain device
530 * @dev: pointer to struct domain_device of interest
531 *
532 * First we notify the LLDD of this device, so we can send frames to
533 * it. Then depending on the type of device we call the appropriate
534 * discover functions. Once device discover is done, we notify the
535 * LLDD so that it can fine-tune its parameters for the device, by
536 * removing it and then adding it. That is, the second time around,
537 * the driver would have certain fields, that it is looking at, set.
538 * Finally we initialize the kobj so that the device can be added to
539 * the system at registration time. Devices directly attached to a HA
540 * port, have no parents. All other devices do, and should have their
541 * "parent" pointer set appropriately before calling this function.
542 */
543int sas_discover_sata(struct domain_device *dev)
544{
545 int res;
546
547 sas_get_ata_command_set(dev);
548
549 res = sas_notify_lldd_dev_found(dev);
550 if (res)
551 return res;
552
553 switch (dev->dev_type) {
554 case SATA_DEV:
555 res = sas_discover_sata_dev(dev);
556 break;
557 case SATA_PM:
558 res = sas_discover_sata_pm(dev);
559 break;
560 default:
561 break;
562 }
563
564 sas_notify_lldd_dev_gone(dev);
565 if (!res) {
566 sas_notify_lldd_dev_found(dev);
567 }
568 return res;
569}
570
571/**
572 * sas_discover_end_dev -- discover an end device (SSP, etc)
573 * @end: pointer to domain device of interest
574 *
575 * See comment in sas_discover_sata().
576 */
577int sas_discover_end_dev(struct domain_device *dev)
578{
579 int res;
580
581 res = sas_notify_lldd_dev_found(dev);
582 if (res)
583 return res;
584
585 res = sas_rphy_add(dev->rphy);
586 if (res)
587 goto out_err;
588
589 /* do this to get the end device port attributes which will have
590 * been scanned in sas_rphy_add */
591 sas_notify_lldd_dev_gone(dev);
592 sas_notify_lldd_dev_found(dev);
593
594 return 0;
595
596out_err:
597 sas_notify_lldd_dev_gone(dev);
598 return res;
599}
600
601/* ---------- Device registration and unregistration ---------- */
602
603static inline void sas_unregister_common_dev(struct domain_device *dev)
604{
605 sas_notify_lldd_dev_gone(dev);
606 if (!dev->parent)
607 dev->port->port_dev = NULL;
608 else
609 list_del_init(&dev->siblings);
610 list_del_init(&dev->dev_list_node);
611}
612
613void sas_unregister_dev(struct domain_device *dev)
614{
615 if (dev->rphy) {
616 sas_remove_children(&dev->rphy->dev);
617 sas_rphy_delete(dev->rphy);
618 dev->rphy = NULL;
619 }
620 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
621 /* remove the phys and ports, everything else should be gone */
622 kfree(dev->ex_dev.ex_phy);
623 dev->ex_dev.ex_phy = NULL;
624 }
625 sas_unregister_common_dev(dev);
626}
627
628void sas_unregister_domain_devices(struct asd_sas_port *port)
629{
630 struct domain_device *dev, *n;
631
632 list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node)
633 sas_unregister_dev(dev);
634
635 port->port->rphy = NULL;
636
637}
638
639/* ---------- Discovery and Revalidation ---------- */
640
641/**
642 * sas_discover_domain -- discover the domain
643 * @port: port to the domain of interest
644 *
645 * NOTE: this process _must_ quit (return) as soon as any connection
646 * errors are encountered. Connection recovery is done elsewhere.
647 * Discover process only interrogates devices in order to discover the
648 * domain.
649 */
650static void sas_discover_domain(void *data)
651{
652 int error = 0;
653 struct asd_sas_port *port = data;
654
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending);
657
658 if (port->port_dev)
659 return ;
660 else {
661 error = sas_get_port_device(port);
662 if (error)
663 return;
664 }
665
666 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
667 current->pid);
668
669 switch (port->port_dev->dev_type) {
670 case SAS_END_DEV:
671 error = sas_discover_end_dev(port->port_dev);
672 break;
673 case EDGE_DEV:
674 case FANOUT_DEV:
675 error = sas_discover_root_expander(port->port_dev);
676 break;
677 case SATA_DEV:
678 case SATA_PM:
679 error = sas_discover_sata(port->port_dev);
680 break;
681 default:
682 SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type);
683 break;
684 }
685
686 if (error) {
687 kfree(port->port_dev); /* not kobject_register-ed yet */
688 port->port_dev = NULL;
689 }
690
691 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
692 current->pid, error);
693}
694
695static void sas_revalidate_domain(void *data)
696{
697 int res = 0;
698 struct asd_sas_port *port = data;
699
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending);
702
703 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
704 current->pid);
705 if (port->port_dev)
706 res = sas_ex_revalidate_domain(port->port_dev);
707
708 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
709 port->id, current->pid, res);
710}
711
712/* ---------- Events ---------- */
713
714int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
715{
716 struct sas_discovery *disc;
717
718 if (!port)
719 return 0;
720 disc = &port->disc;
721
722 BUG_ON(ev >= DISC_NUM_EVENTS);
723
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost);
726
727 return 0;
728}
729
730/**
731 * sas_init_disc -- initialize the discovery struct in the port
732 * @port: pointer to struct port
733 *
734 * Called when the ports are being initialized.
735 */
736void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{
738 int i;
739
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 };
744
745 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++)
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
749}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
new file mode 100644
index 000000000000..f1246d2c9bef
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -0,0 +1,76 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_dump.h"
26
27#ifdef SAS_DEBUG
28
29static const char *sas_hae_str[] = {
30 [0] = "HAE_RESET",
31};
32
33static const char *sas_porte_str[] = {
34 [0] = "PORTE_BYTES_DMAED",
35 [1] = "PORTE_BROADCAST_RCVD",
36 [2] = "PORTE_LINK_RESET_ERR",
37 [3] = "PORTE_TIMER_EVENT",
38 [4] = "PORTE_HARD_RESET",
39};
40
41static const char *sas_phye_str[] = {
42 [0] = "PHYE_LOSS_OF_SIGNAL",
43 [1] = "PHYE_OOB_DONE",
44 [2] = "PHYE_OOB_ERROR",
45 [3] = "PHYE_SPINUP_HOLD",
46};
47
48void sas_dprint_porte(int phyid, enum port_event pe)
49{
50 SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
51}
52void sas_dprint_phye(int phyid, enum phy_event pe)
53{
54 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
55}
56
57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
58{
59 SAS_DPRINTK("ha %s: %s event\n", pci_name(sas_ha->pcidev),
60 sas_hae_str[he]);
61}
62
63void sas_dump_port(struct asd_sas_port *port)
64{
65 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
66 SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
67 SAS_ADDR(port->sas_addr));
68 SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
69 SAS_ADDR(port->attached_sas_addr));
70 SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
71 SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
72 SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
73 SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
74}
75
76#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
new file mode 100644
index 000000000000..47b45d4f5258
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -0,0 +1,42 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#ifdef SAS_DEBUG
28
29void sas_dprint_porte(int phyid, enum port_event pe);
30void sas_dprint_phye(int phyid, enum phy_event pe);
31void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
32void sas_dump_port(struct asd_sas_port *port);
33
34#else /* SAS_DEBUG */
35
36static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
37static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
38static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
39 enum ha_event he) { }
40static inline void sas_dump_port(struct asd_sas_port *port) { }
41
42#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
new file mode 100644
index 000000000000..19110ed1c89c
--- /dev/null
+++ b/drivers/scsi/libsas/sas_event.c
@@ -0,0 +1,75 @@
1/*
2 * Serial Attached SCSI (SAS) Event processing
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <scsi/scsi_host.h>
26#include "sas_internal.h"
27#include "sas_dump.h"
28
29static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
30{
31 BUG_ON(event >= HA_NUM_EVENTS);
32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost);
35}
36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
38{
39 struct sas_ha_struct *ha = phy->ha;
40
41 BUG_ON(event >= PORT_NUM_EVENTS);
42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost);
45}
46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
48{
49 struct sas_ha_struct *ha = phy->ha;
50
51 BUG_ON(event >= PHY_NUM_EVENTS);
52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost);
55}
56
57int sas_init_events(struct sas_ha_struct *sas_ha)
58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
60 [HAE_RESET] = sas_hae_reset,
61 };
62
63 int i;
64
65 spin_lock_init(&sas_ha->event_lock);
66
67 for (i = 0; i < HA_NUM_EVENTS; i++)
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
69
70 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event;
72 sas_ha->notify_phy_event = notify_phy_event;
73
74 return 0;
75}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
new file mode 100644
index 000000000000..30b8014bcc7a
--- /dev/null
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -0,0 +1,1855 @@
1/*
2 * Serial Attached SCSI (SAS) Expander discovery and configuration
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27
28#include "sas_internal.h"
29
30#include <scsi/scsi_transport.h>
31#include <scsi/scsi_transport_sas.h>
32#include "../scsi_sas_internal.h"
33
34static int sas_discover_expander(struct domain_device *dev);
35static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
36static int sas_configure_phy(struct domain_device *dev, int phy_id,
37 u8 *sas_addr, int include);
38static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
39
40#if 0
41/* FIXME: smp needs to migrate into the sas class */
42static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t);
43static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t);
44#endif
45
46/* ---------- SMP task management ---------- */
47
48static void smp_task_timedout(unsigned long _task)
49{
50 struct sas_task *task = (void *) _task;
51 unsigned long flags;
52
53 spin_lock_irqsave(&task->task_state_lock, flags);
54 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
55 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
56 spin_unlock_irqrestore(&task->task_state_lock, flags);
57
58 complete(&task->completion);
59}
60
61static void smp_task_done(struct sas_task *task)
62{
63 if (!del_timer(&task->timer))
64 return;
65 complete(&task->completion);
66}
67
68/* Give it some long enough timeout. In seconds. */
69#define SMP_TIMEOUT 10
70
71static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
72 void *resp, int resp_size)
73{
74 int res;
75 struct sas_task *task = sas_alloc_task(GFP_KERNEL);
76 struct sas_internal *i =
77 to_sas_internal(dev->port->ha->core.shost->transportt);
78
79 if (!task)
80 return -ENOMEM;
81
82 task->dev = dev;
83 task->task_proto = dev->tproto;
84 sg_init_one(&task->smp_task.smp_req, req, req_size);
85 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
86
87 task->task_done = smp_task_done;
88
89 task->timer.data = (unsigned long) task;
90 task->timer.function = smp_task_timedout;
91 task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
92 add_timer(&task->timer);
93
94 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
95
96 if (res) {
97 del_timer(&task->timer);
98 SAS_DPRINTK("executing SMP task failed:%d\n", res);
99 goto ex_err;
100 }
101
102 wait_for_completion(&task->completion);
103 res = -ETASK;
104 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
105 SAS_DPRINTK("smp task timed out or aborted\n");
106 i->dft->lldd_abort_task(task);
107 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
108 SAS_DPRINTK("SMP task aborted and not done\n");
109 goto ex_err;
110 }
111 }
112 if (task->task_status.resp == SAS_TASK_COMPLETE &&
113 task->task_status.stat == SAM_GOOD)
114 res = 0;
115 else
116 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
117 "status 0x%x\n", __FUNCTION__,
118 SAS_ADDR(dev->sas_addr),
119 task->task_status.resp,
120 task->task_status.stat);
121ex_err:
122 sas_free_task(task);
123 return res;
124}
125
126/* ---------- Allocations ---------- */
127
128static inline void *alloc_smp_req(int size)
129{
130 u8 *p = kzalloc(size, GFP_KERNEL);
131 if (p)
132 p[0] = SMP_REQUEST;
133 return p;
134}
135
136static inline void *alloc_smp_resp(int size)
137{
138 return kzalloc(size, GFP_KERNEL);
139}
140
141/* ---------- Expander configuration ---------- */
142
143static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
144 void *disc_resp)
145{
146 struct expander_device *ex = &dev->ex_dev;
147 struct ex_phy *phy = &ex->ex_phy[phy_id];
148 struct smp_resp *resp = disc_resp;
149 struct discover_resp *dr = &resp->disc;
150 struct sas_rphy *rphy = dev->rphy;
151 int rediscover = (phy->phy != NULL);
152
153 if (!rediscover) {
154 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
155
156 /* FIXME: error_handling */
157 BUG_ON(!phy->phy);
158 }
159
160 switch (resp->result) {
161 case SMP_RESP_PHY_VACANT:
162 phy->phy_state = PHY_VACANT;
163 return;
164 default:
165 phy->phy_state = PHY_NOT_PRESENT;
166 return;
167 case SMP_RESP_FUNC_ACC:
168 phy->phy_state = PHY_EMPTY; /* do not know yet */
169 break;
170 }
171
172 phy->phy_id = phy_id;
173 phy->attached_dev_type = dr->attached_dev_type;
174 phy->linkrate = dr->linkrate;
175 phy->attached_sata_host = dr->attached_sata_host;
176 phy->attached_sata_dev = dr->attached_sata_dev;
177 phy->attached_sata_ps = dr->attached_sata_ps;
178 phy->attached_iproto = dr->iproto << 1;
179 phy->attached_tproto = dr->tproto << 1;
180 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
181 phy->attached_phy_id = dr->attached_phy_id;
182 phy->phy_change_count = dr->change_count;
183 phy->routing_attr = dr->routing_attr;
184 phy->virtual = dr->virtual;
185 phy->last_da_index = -1;
186
187 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
188 phy->phy->identify.target_port_protocols = phy->attached_tproto;
189 phy->phy->identify.phy_identifier = phy_id;
190 phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
191 phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
192 phy->phy->minimum_linkrate = dr->pmin_linkrate;
193 phy->phy->maximum_linkrate = dr->pmax_linkrate;
194 phy->phy->negotiated_linkrate = phy->linkrate;
195
196 if (!rediscover)
197 sas_phy_add(phy->phy);
198
199 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
200 SAS_ADDR(dev->sas_addr), phy->phy_id,
201 phy->routing_attr == TABLE_ROUTING ? 'T' :
202 phy->routing_attr == DIRECT_ROUTING ? 'D' :
203 phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
204 SAS_ADDR(phy->attached_sas_addr));
205
206 return;
207}
208
209#define DISCOVER_REQ_SIZE 16
210#define DISCOVER_RESP_SIZE 56
211
212static int sas_ex_phy_discover(struct domain_device *dev, int single)
213{
214 struct expander_device *ex = &dev->ex_dev;
215 int res = 0;
216 u8 *disc_req;
217 u8 *disc_resp;
218
219 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
220 if (!disc_req)
221 return -ENOMEM;
222
223 disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
224 if (!disc_resp) {
225 kfree(disc_req);
226 return -ENOMEM;
227 }
228
229 disc_req[1] = SMP_DISCOVER;
230
231 if (0 <= single && single < ex->num_phys) {
232 disc_req[9] = single;
233 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
234 disc_resp, DISCOVER_RESP_SIZE);
235 if (res)
236 goto out_err;
237 sas_set_ex_phy(dev, single, disc_resp);
238 } else {
239 int i;
240
241 for (i = 0; i < ex->num_phys; i++) {
242 disc_req[9] = i;
243 res = smp_execute_task(dev, disc_req,
244 DISCOVER_REQ_SIZE, disc_resp,
245 DISCOVER_RESP_SIZE);
246 if (res)
247 goto out_err;
248 sas_set_ex_phy(dev, i, disc_resp);
249 }
250 }
251out_err:
252 kfree(disc_resp);
253 kfree(disc_req);
254 return res;
255}
256
257static int sas_expander_discover(struct domain_device *dev)
258{
259 struct expander_device *ex = &dev->ex_dev;
260 int res = -ENOMEM;
261
262 ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL);
263 if (!ex->ex_phy)
264 return -ENOMEM;
265
266 res = sas_ex_phy_discover(dev, -1);
267 if (res)
268 goto out_err;
269
270 return 0;
271 out_err:
272 kfree(ex->ex_phy);
273 ex->ex_phy = NULL;
274 return res;
275}
276
277#define MAX_EXPANDER_PHYS 128
278
279static void ex_assign_report_general(struct domain_device *dev,
280 struct smp_resp *resp)
281{
282 struct report_general_resp *rg = &resp->rg;
283
284 dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
285 dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
286 dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
287 dev->ex_dev.conf_route_table = rg->conf_route_table;
288 dev->ex_dev.configuring = rg->configuring;
289 memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
290}
291
292#define RG_REQ_SIZE 8
293#define RG_RESP_SIZE 32
294
295static int sas_ex_general(struct domain_device *dev)
296{
297 u8 *rg_req;
298 struct smp_resp *rg_resp;
299 int res;
300 int i;
301
302 rg_req = alloc_smp_req(RG_REQ_SIZE);
303 if (!rg_req)
304 return -ENOMEM;
305
306 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
307 if (!rg_resp) {
308 kfree(rg_req);
309 return -ENOMEM;
310 }
311
312 rg_req[1] = SMP_REPORT_GENERAL;
313
314 for (i = 0; i < 5; i++) {
315 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
316 RG_RESP_SIZE);
317
318 if (res) {
319 SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
320 SAS_ADDR(dev->sas_addr), res);
321 goto out;
322 } else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
323 SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
324 SAS_ADDR(dev->sas_addr), rg_resp->result);
325 res = rg_resp->result;
326 goto out;
327 }
328
329 ex_assign_report_general(dev, rg_resp);
330
331 if (dev->ex_dev.configuring) {
332 SAS_DPRINTK("RG: ex %llx self-configuring...\n",
333 SAS_ADDR(dev->sas_addr));
334 schedule_timeout_interruptible(5*HZ);
335 } else
336 break;
337 }
338out:
339 kfree(rg_req);
340 kfree(rg_resp);
341 return res;
342}
343
344static void ex_assign_manuf_info(struct domain_device *dev, void
345 *_mi_resp)
346{
347 u8 *mi_resp = _mi_resp;
348 struct sas_rphy *rphy = dev->rphy;
349 struct sas_expander_device *edev = rphy_to_expander_device(rphy);
350
351 memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN);
352 memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN);
353 memcpy(edev->product_rev, mi_resp + 36,
354 SAS_EXPANDER_PRODUCT_REV_LEN);
355
356 if (mi_resp[8] & 1) {
357 memcpy(edev->component_vendor_id, mi_resp + 40,
358 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
359 edev->component_id = mi_resp[48] << 8 | mi_resp[49];
360 edev->component_revision_id = mi_resp[50];
361 }
362}
363
364#define MI_REQ_SIZE 8
365#define MI_RESP_SIZE 64
366
367static int sas_ex_manuf_info(struct domain_device *dev)
368{
369 u8 *mi_req;
370 u8 *mi_resp;
371 int res;
372
373 mi_req = alloc_smp_req(MI_REQ_SIZE);
374 if (!mi_req)
375 return -ENOMEM;
376
377 mi_resp = alloc_smp_resp(MI_RESP_SIZE);
378 if (!mi_resp) {
379 kfree(mi_req);
380 return -ENOMEM;
381 }
382
383 mi_req[1] = SMP_REPORT_MANUF_INFO;
384
385 res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
386 if (res) {
387 SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
388 SAS_ADDR(dev->sas_addr), res);
389 goto out;
390 } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
391 SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
392 SAS_ADDR(dev->sas_addr), mi_resp[2]);
393 goto out;
394 }
395
396 ex_assign_manuf_info(dev, mi_resp);
397out:
398 kfree(mi_req);
399 kfree(mi_resp);
400 return res;
401}
402
403#define PC_REQ_SIZE 44
404#define PC_RESP_SIZE 8
405
406int sas_smp_phy_control(struct domain_device *dev, int phy_id,
407 enum phy_func phy_func,
408 struct sas_phy_linkrates *rates)
409{
410 u8 *pc_req;
411 u8 *pc_resp;
412 int res;
413
414 pc_req = alloc_smp_req(PC_REQ_SIZE);
415 if (!pc_req)
416 return -ENOMEM;
417
418 pc_resp = alloc_smp_resp(PC_RESP_SIZE);
419 if (!pc_resp) {
420 kfree(pc_req);
421 return -ENOMEM;
422 }
423
424 pc_req[1] = SMP_PHY_CONTROL;
425 pc_req[9] = phy_id;
426 pc_req[10]= phy_func;
427 if (rates) {
428 pc_req[32] = rates->minimum_linkrate << 4;
429 pc_req[33] = rates->maximum_linkrate << 4;
430 }
431
432 res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
433
434 kfree(pc_resp);
435 kfree(pc_req);
436 return res;
437}
438
439static void sas_ex_disable_phy(struct domain_device *dev, int phy_id)
440{
441 struct expander_device *ex = &dev->ex_dev;
442 struct ex_phy *phy = &ex->ex_phy[phy_id];
443
444 sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL);
445 phy->linkrate = SAS_PHY_DISABLED;
446}
447
448static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr)
449{
450 struct expander_device *ex = &dev->ex_dev;
451 int i;
452
453 for (i = 0; i < ex->num_phys; i++) {
454 struct ex_phy *phy = &ex->ex_phy[i];
455
456 if (phy->phy_state == PHY_VACANT ||
457 phy->phy_state == PHY_NOT_PRESENT)
458 continue;
459
460 if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr))
461 sas_ex_disable_phy(dev, i);
462 }
463}
464
465static int sas_dev_present_in_domain(struct asd_sas_port *port,
466 u8 *sas_addr)
467{
468 struct domain_device *dev;
469
470 if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr))
471 return 1;
472 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
473 if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr))
474 return 1;
475 }
476 return 0;
477}
478
479#define RPEL_REQ_SIZE 16
480#define RPEL_RESP_SIZE 32
481int sas_smp_get_phy_events(struct sas_phy *phy)
482{
483 int res;
484 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
485 struct domain_device *dev = sas_find_dev_by_rphy(rphy);
486 u8 *req = alloc_smp_req(RPEL_REQ_SIZE);
487 u8 *resp = kzalloc(RPEL_RESP_SIZE, GFP_KERNEL);
488
489 if (!resp)
490 return -ENOMEM;
491
492 req[1] = SMP_REPORT_PHY_ERR_LOG;
493 req[9] = phy->number;
494
495 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
496 resp, RPEL_RESP_SIZE);
497
498 if (!res)
499 goto out;
500
501 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
502 phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
503 phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
504 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
505
506 out:
507 kfree(resp);
508 return res;
509
510}
511
512#define RPS_REQ_SIZE 16
513#define RPS_RESP_SIZE 60
514
515static int sas_get_report_phy_sata(struct domain_device *dev,
516 int phy_id,
517 struct smp_resp *rps_resp)
518{
519 int res;
520 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
521
522 if (!rps_req)
523 return -ENOMEM;
524
525 rps_req[1] = SMP_REPORT_PHY_SATA;
526 rps_req[9] = phy_id;
527
528 res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
529 rps_resp, RPS_RESP_SIZE);
530
531 kfree(rps_req);
532 return 0;
533}
534
535static void sas_ex_get_linkrate(struct domain_device *parent,
536 struct domain_device *child,
537 struct ex_phy *parent_phy)
538{
539 struct expander_device *parent_ex = &parent->ex_dev;
540 struct sas_port *port;
541 int i;
542
543 child->pathways = 0;
544
545 port = parent_phy->port;
546
547 for (i = 0; i < parent_ex->num_phys; i++) {
548 struct ex_phy *phy = &parent_ex->ex_phy[i];
549
550 if (phy->phy_state == PHY_VACANT ||
551 phy->phy_state == PHY_NOT_PRESENT)
552 continue;
553
554 if (SAS_ADDR(phy->attached_sas_addr) ==
555 SAS_ADDR(child->sas_addr)) {
556
557 child->min_linkrate = min(parent->min_linkrate,
558 phy->linkrate);
559 child->max_linkrate = max(parent->max_linkrate,
560 phy->linkrate);
561 child->pathways++;
562 sas_port_add_phy(port, phy->phy);
563 }
564 }
565 child->linkrate = min(parent_phy->linkrate, child->max_linkrate);
566 child->pathways = min(child->pathways, parent->pathways);
567}
568
569static struct domain_device *sas_ex_discover_end_dev(
570 struct domain_device *parent, int phy_id)
571{
572 struct expander_device *parent_ex = &parent->ex_dev;
573 struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
574 struct domain_device *child = NULL;
575 struct sas_rphy *rphy;
576 int res;
577
578 if (phy->attached_sata_host || phy->attached_sata_ps)
579 return NULL;
580
581 child = kzalloc(sizeof(*child), GFP_KERNEL);
582 if (!child)
583 return NULL;
584
585 child->parent = parent;
586 child->port = parent->port;
587 child->iproto = phy->attached_iproto;
588 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
589 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
590 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
591 BUG_ON(!phy->port);
592 /* FIXME: better error handling*/
593 BUG_ON(sas_port_add(phy->port) != 0);
594 sas_ex_get_linkrate(parent, child, phy);
595
596 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
597 child->dev_type = SATA_DEV;
598 if (phy->attached_tproto & SAS_PROTO_STP)
599 child->tproto = phy->attached_tproto;
600 if (phy->attached_sata_dev)
601 child->tproto |= SATA_DEV;
602 res = sas_get_report_phy_sata(parent, phy_id,
603 &child->sata_dev.rps_resp);
604 if (res) {
605 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
606 "0x%x\n", SAS_ADDR(parent->sas_addr),
607 phy_id, res);
608 kfree(child);
609 return NULL;
610 }
611 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
612 sizeof(struct dev_to_host_fis));
613 sas_init_dev(child);
614 res = sas_discover_sata(child);
615 if (res) {
616 SAS_DPRINTK("sas_discover_sata() for device %16llx at "
617 "%016llx:0x%x returned 0x%x\n",
618 SAS_ADDR(child->sas_addr),
619 SAS_ADDR(parent->sas_addr), phy_id, res);
620 kfree(child);
621 return NULL;
622 }
623 } else if (phy->attached_tproto & SAS_PROTO_SSP) {
624 child->dev_type = SAS_END_DEV;
625 rphy = sas_end_device_alloc(phy->port);
626 /* FIXME: error handling */
627 BUG_ON(!rphy);
628 child->tproto = phy->attached_tproto;
629 sas_init_dev(child);
630
631 child->rphy = rphy;
632 sas_fill_in_rphy(child, rphy);
633
634 spin_lock(&parent->port->dev_list_lock);
635 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
636 spin_unlock(&parent->port->dev_list_lock);
637
638 res = sas_discover_end_dev(child);
639 if (res) {
640 SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
641 "at %016llx:0x%x returned 0x%x\n",
642 SAS_ADDR(child->sas_addr),
643 SAS_ADDR(parent->sas_addr), phy_id, res);
644 /* FIXME: this kfrees list elements without removing them */
645 //kfree(child);
646 return NULL;
647 }
648 } else {
649 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
650 phy->attached_tproto, SAS_ADDR(parent->sas_addr),
651 phy_id);
652 }
653
654 list_add_tail(&child->siblings, &parent_ex->children);
655 return child;
656}
657
658static struct domain_device *sas_ex_discover_expander(
659 struct domain_device *parent, int phy_id)
660{
661 struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
662 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
663 struct domain_device *child = NULL;
664 struct sas_rphy *rphy;
665 struct sas_expander_device *edev;
666 struct asd_sas_port *port;
667 int res;
668
669 if (phy->routing_attr == DIRECT_ROUTING) {
670 SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
671 "allowed\n",
672 SAS_ADDR(parent->sas_addr), phy_id,
673 SAS_ADDR(phy->attached_sas_addr),
674 phy->attached_phy_id);
675 return NULL;
676 }
677 child = kzalloc(sizeof(*child), GFP_KERNEL);
678 if (!child)
679 return NULL;
680
681 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
682 /* FIXME: better error handling */
683 BUG_ON(sas_port_add(phy->port) != 0);
684
685
686 switch (phy->attached_dev_type) {
687 case EDGE_DEV:
688 rphy = sas_expander_alloc(phy->port,
689 SAS_EDGE_EXPANDER_DEVICE);
690 break;
691 case FANOUT_DEV:
692 rphy = sas_expander_alloc(phy->port,
693 SAS_FANOUT_EXPANDER_DEVICE);
694 break;
695 default:
696 rphy = NULL; /* shut gcc up */
697 BUG();
698 }
699 port = parent->port;
700 child->rphy = rphy;
701 edev = rphy_to_expander_device(rphy);
702 child->dev_type = phy->attached_dev_type;
703 child->parent = parent;
704 child->port = port;
705 child->iproto = phy->attached_iproto;
706 child->tproto = phy->attached_tproto;
707 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
708 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
709 sas_ex_get_linkrate(parent, child, phy);
710 edev->level = parent_ex->level + 1;
711 parent->port->disc.max_level = max(parent->port->disc.max_level,
712 edev->level);
713 sas_init_dev(child);
714 sas_fill_in_rphy(child, rphy);
715 sas_rphy_add(rphy);
716
717 spin_lock(&parent->port->dev_list_lock);
718 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
719 spin_unlock(&parent->port->dev_list_lock);
720
721 res = sas_discover_expander(child);
722 if (res) {
723 kfree(child);
724 return NULL;
725 }
726 list_add_tail(&child->siblings, &parent->ex_dev.children);
727 return child;
728}
729
730static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
731{
732 struct expander_device *ex = &dev->ex_dev;
733 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
734 struct domain_device *child = NULL;
735 int res = 0;
736
737 /* Phy state */
738 if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) {
739 if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL))
740 res = sas_ex_phy_discover(dev, phy_id);
741 if (res)
742 return res;
743 }
744
745 /* Parent and domain coherency */
746 if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
747 SAS_ADDR(dev->port->sas_addr))) {
748 sas_add_parent_port(dev, phy_id);
749 return 0;
750 }
751 if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
752 SAS_ADDR(dev->parent->sas_addr))) {
753 sas_add_parent_port(dev, phy_id);
754 if (ex_phy->routing_attr == TABLE_ROUTING)
755 sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
756 return 0;
757 }
758
759 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
760 sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
761
762 if (ex_phy->attached_dev_type == NO_DEVICE) {
763 if (ex_phy->routing_attr == DIRECT_ROUTING) {
764 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
765 sas_configure_routing(dev, ex_phy->attached_sas_addr);
766 }
767 return 0;
768 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
769 return 0;
770
771 if (ex_phy->attached_dev_type != SAS_END_DEV &&
772 ex_phy->attached_dev_type != FANOUT_DEV &&
773 ex_phy->attached_dev_type != EDGE_DEV) {
774 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
775 "phy 0x%x\n", ex_phy->attached_dev_type,
776 SAS_ADDR(dev->sas_addr),
777 phy_id);
778 return 0;
779 }
780
781 res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
782 if (res) {
783 SAS_DPRINTK("configure routing for dev %016llx "
784 "reported 0x%x. Forgotten\n",
785 SAS_ADDR(ex_phy->attached_sas_addr), res);
786 sas_disable_routing(dev, ex_phy->attached_sas_addr);
787 return res;
788 }
789
790 switch (ex_phy->attached_dev_type) {
791 case SAS_END_DEV:
792 child = sas_ex_discover_end_dev(dev, phy_id);
793 break;
794 case FANOUT_DEV:
795 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
796 SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
797 "attached to ex %016llx phy 0x%x\n",
798 SAS_ADDR(ex_phy->attached_sas_addr),
799 ex_phy->attached_phy_id,
800 SAS_ADDR(dev->sas_addr),
801 phy_id);
802 sas_ex_disable_phy(dev, phy_id);
803 break;
804 } else
805 memcpy(dev->port->disc.fanout_sas_addr,
806 ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
807 /* fallthrough */
808 case EDGE_DEV:
809 child = sas_ex_discover_expander(dev, phy_id);
810 break;
811 default:
812 break;
813 }
814
815 if (child) {
816 int i;
817
818 for (i = 0; i < ex->num_phys; i++) {
819 if (ex->ex_phy[i].phy_state == PHY_VACANT ||
820 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
821 continue;
822
823 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
824 SAS_ADDR(child->sas_addr))
825 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
826 }
827 }
828
829 return res;
830}
831
832static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
833{
834 struct expander_device *ex = &dev->ex_dev;
835 int i;
836
837 for (i = 0; i < ex->num_phys; i++) {
838 struct ex_phy *phy = &ex->ex_phy[i];
839
840 if (phy->phy_state == PHY_VACANT ||
841 phy->phy_state == PHY_NOT_PRESENT)
842 continue;
843
844 if ((phy->attached_dev_type == EDGE_DEV ||
845 phy->attached_dev_type == FANOUT_DEV) &&
846 phy->routing_attr == SUBTRACTIVE_ROUTING) {
847
848 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
849
850 return 1;
851 }
852 }
853 return 0;
854}
855
856static int sas_check_level_subtractive_boundary(struct domain_device *dev)
857{
858 struct expander_device *ex = &dev->ex_dev;
859 struct domain_device *child;
860 u8 sub_addr[8] = {0, };
861
862 list_for_each_entry(child, &ex->children, siblings) {
863 if (child->dev_type != EDGE_DEV &&
864 child->dev_type != FANOUT_DEV)
865 continue;
866 if (sub_addr[0] == 0) {
867 sas_find_sub_addr(child, sub_addr);
868 continue;
869 } else {
870 u8 s2[8];
871
872 if (sas_find_sub_addr(child, s2) &&
873 (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
874
875 SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
876 "diverges from subtractive "
877 "boundary %016llx\n",
878 SAS_ADDR(dev->sas_addr),
879 SAS_ADDR(child->sas_addr),
880 SAS_ADDR(s2),
881 SAS_ADDR(sub_addr));
882
883 sas_ex_disable_port(child, s2);
884 }
885 }
886 }
887 return 0;
888}
889/**
890 * sas_ex_discover_devices -- discover devices attached to this expander
891 * dev: pointer to the expander domain device
892 * single: if you want to do a single phy, else set to -1;
893 *
894 * Configure this expander for use with its devices and register the
895 * devices of this expander.
896 */
897static int sas_ex_discover_devices(struct domain_device *dev, int single)
898{
899 struct expander_device *ex = &dev->ex_dev;
900 int i = 0, end = ex->num_phys;
901 int res = 0;
902
903 if (0 <= single && single < end) {
904 i = single;
905 end = i+1;
906 }
907
908 for ( ; i < end; i++) {
909 struct ex_phy *ex_phy = &ex->ex_phy[i];
910
911 if (ex_phy->phy_state == PHY_VACANT ||
912 ex_phy->phy_state == PHY_NOT_PRESENT ||
913 ex_phy->phy_state == PHY_DEVICE_DISCOVERED)
914 continue;
915
916 switch (ex_phy->linkrate) {
917 case SAS_PHY_DISABLED:
918 case SAS_PHY_RESET_PROBLEM:
919 case SAS_SATA_PORT_SELECTOR:
920 continue;
921 default:
922 res = sas_ex_discover_dev(dev, i);
923 if (res)
924 break;
925 continue;
926 }
927 }
928
929 if (!res)
930 sas_check_level_subtractive_boundary(dev);
931
932 return res;
933}
934
935static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
936{
937 struct expander_device *ex = &dev->ex_dev;
938 int i;
939 u8 *sub_sas_addr = NULL;
940
941 if (dev->dev_type != EDGE_DEV)
942 return 0;
943
944 for (i = 0; i < ex->num_phys; i++) {
945 struct ex_phy *phy = &ex->ex_phy[i];
946
947 if (phy->phy_state == PHY_VACANT ||
948 phy->phy_state == PHY_NOT_PRESENT)
949 continue;
950
951 if ((phy->attached_dev_type == FANOUT_DEV ||
952 phy->attached_dev_type == EDGE_DEV) &&
953 phy->routing_attr == SUBTRACTIVE_ROUTING) {
954
955 if (!sub_sas_addr)
956 sub_sas_addr = &phy->attached_sas_addr[0];
957 else if (SAS_ADDR(sub_sas_addr) !=
958 SAS_ADDR(phy->attached_sas_addr)) {
959
960 SAS_DPRINTK("ex %016llx phy 0x%x "
961 "diverges(%016llx) on subtractive "
962 "boundary(%016llx). Disabled\n",
963 SAS_ADDR(dev->sas_addr), i,
964 SAS_ADDR(phy->attached_sas_addr),
965 SAS_ADDR(sub_sas_addr));
966 sas_ex_disable_phy(dev, i);
967 }
968 }
969 }
970 return 0;
971}
972
973static void sas_print_parent_topology_bug(struct domain_device *child,
974 struct ex_phy *parent_phy,
975 struct ex_phy *child_phy)
976{
977 static const char ra_char[] = {
978 [DIRECT_ROUTING] = 'D',
979 [SUBTRACTIVE_ROUTING] = 'S',
980 [TABLE_ROUTING] = 'T',
981 };
982 static const char *ex_type[] = {
983 [EDGE_DEV] = "edge",
984 [FANOUT_DEV] = "fanout",
985 };
986 struct domain_device *parent = child->parent;
987
988 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x "
989 "has %c:%c routing link!\n",
990
991 ex_type[parent->dev_type],
992 SAS_ADDR(parent->sas_addr),
993 parent_phy->phy_id,
994
995 ex_type[child->dev_type],
996 SAS_ADDR(child->sas_addr),
997 child_phy->phy_id,
998
999 ra_char[parent_phy->routing_attr],
1000 ra_char[child_phy->routing_attr]);
1001}
1002
1003static int sas_check_eeds(struct domain_device *child,
1004 struct ex_phy *parent_phy,
1005 struct ex_phy *child_phy)
1006{
1007 int res = 0;
1008 struct domain_device *parent = child->parent;
1009
1010 if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
1011 res = -ENODEV;
1012 SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
1013 "phy S:0x%x, while there is a fanout ex %016llx\n",
1014 SAS_ADDR(parent->sas_addr),
1015 parent_phy->phy_id,
1016 SAS_ADDR(child->sas_addr),
1017 child_phy->phy_id,
1018 SAS_ADDR(parent->port->disc.fanout_sas_addr));
1019 } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
1020 memcpy(parent->port->disc.eeds_a, parent->sas_addr,
1021 SAS_ADDR_SIZE);
1022 memcpy(parent->port->disc.eeds_b, child->sas_addr,
1023 SAS_ADDR_SIZE);
1024 } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
1025 SAS_ADDR(parent->sas_addr)) ||
1026 (SAS_ADDR(parent->port->disc.eeds_a) ==
1027 SAS_ADDR(child->sas_addr)))
1028 &&
1029 ((SAS_ADDR(parent->port->disc.eeds_b) ==
1030 SAS_ADDR(parent->sas_addr)) ||
1031 (SAS_ADDR(parent->port->disc.eeds_b) ==
1032 SAS_ADDR(child->sas_addr))))
1033 ;
1034 else {
1035 res = -ENODEV;
1036 SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
1037 "phy 0x%x link forms a third EEDS!\n",
1038 SAS_ADDR(parent->sas_addr),
1039 parent_phy->phy_id,
1040 SAS_ADDR(child->sas_addr),
1041 child_phy->phy_id);
1042 }
1043
1044 return res;
1045}
1046
1047/* Here we spill over 80 columns. It is intentional.
1048 */
1049static int sas_check_parent_topology(struct domain_device *child)
1050{
1051 struct expander_device *child_ex = &child->ex_dev;
1052 struct expander_device *parent_ex;
1053 int i;
1054 int res = 0;
1055
1056 if (!child->parent)
1057 return 0;
1058
1059 if (child->parent->dev_type != EDGE_DEV &&
1060 child->parent->dev_type != FANOUT_DEV)
1061 return 0;
1062
1063 parent_ex = &child->parent->ex_dev;
1064
1065 for (i = 0; i < parent_ex->num_phys; i++) {
1066 struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
1067 struct ex_phy *child_phy;
1068
1069 if (parent_phy->phy_state == PHY_VACANT ||
1070 parent_phy->phy_state == PHY_NOT_PRESENT)
1071 continue;
1072
1073 if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr))
1074 continue;
1075
1076 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
1077
1078 switch (child->parent->dev_type) {
1079 case EDGE_DEV:
1080 if (child->dev_type == FANOUT_DEV) {
1081 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
1082 child_phy->routing_attr != TABLE_ROUTING) {
1083 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1084 res = -ENODEV;
1085 }
1086 } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1087 if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1088 res = sas_check_eeds(child, parent_phy, child_phy);
1089 } else if (child_phy->routing_attr != TABLE_ROUTING) {
1090 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1091 res = -ENODEV;
1092 }
1093 } else if (parent_phy->routing_attr == TABLE_ROUTING &&
1094 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1095 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1096 res = -ENODEV;
1097 }
1098 break;
1099 case FANOUT_DEV:
1100 if (parent_phy->routing_attr != TABLE_ROUTING ||
1101 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1102 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1103 res = -ENODEV;
1104 }
1105 break;
1106 default:
1107 break;
1108 }
1109 }
1110
1111 return res;
1112}
1113
1114#define RRI_REQ_SIZE 16
1115#define RRI_RESP_SIZE 44
1116
1117static int sas_configure_present(struct domain_device *dev, int phy_id,
1118 u8 *sas_addr, int *index, int *present)
1119{
1120 int i, res = 0;
1121 struct expander_device *ex = &dev->ex_dev;
1122 struct ex_phy *phy = &ex->ex_phy[phy_id];
1123 u8 *rri_req;
1124 u8 *rri_resp;
1125
1126 *present = 0;
1127 *index = 0;
1128
1129 rri_req = alloc_smp_req(RRI_REQ_SIZE);
1130 if (!rri_req)
1131 return -ENOMEM;
1132
1133 rri_resp = alloc_smp_resp(RRI_RESP_SIZE);
1134 if (!rri_resp) {
1135 kfree(rri_req);
1136 return -ENOMEM;
1137 }
1138
1139 rri_req[1] = SMP_REPORT_ROUTE_INFO;
1140 rri_req[9] = phy_id;
1141
1142 for (i = 0; i < ex->max_route_indexes ; i++) {
1143 *(__be16 *)(rri_req+6) = cpu_to_be16(i);
1144 res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp,
1145 RRI_RESP_SIZE);
1146 if (res)
1147 goto out;
1148 res = rri_resp[2];
1149 if (res == SMP_RESP_NO_INDEX) {
1150 SAS_DPRINTK("overflow of indexes: dev %016llx "
1151 "phy 0x%x index 0x%x\n",
1152 SAS_ADDR(dev->sas_addr), phy_id, i);
1153 goto out;
1154 } else if (res != SMP_RESP_FUNC_ACC) {
1155 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
1156 "result 0x%x\n", __FUNCTION__,
1157 SAS_ADDR(dev->sas_addr), phy_id, i, res);
1158 goto out;
1159 }
1160 if (SAS_ADDR(sas_addr) != 0) {
1161 if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) {
1162 *index = i;
1163 if ((rri_resp[12] & 0x80) == 0x80)
1164 *present = 0;
1165 else
1166 *present = 1;
1167 goto out;
1168 } else if (SAS_ADDR(rri_resp+16) == 0) {
1169 *index = i;
1170 *present = 0;
1171 goto out;
1172 }
1173 } else if (SAS_ADDR(rri_resp+16) == 0 &&
1174 phy->last_da_index < i) {
1175 phy->last_da_index = i;
1176 *index = i;
1177 *present = 0;
1178 goto out;
1179 }
1180 }
1181 res = -1;
1182out:
1183 kfree(rri_req);
1184 kfree(rri_resp);
1185 return res;
1186}
1187
1188#define CRI_REQ_SIZE 44
1189#define CRI_RESP_SIZE 8
1190
1191static int sas_configure_set(struct domain_device *dev, int phy_id,
1192 u8 *sas_addr, int index, int include)
1193{
1194 int res;
1195 u8 *cri_req;
1196 u8 *cri_resp;
1197
1198 cri_req = alloc_smp_req(CRI_REQ_SIZE);
1199 if (!cri_req)
1200 return -ENOMEM;
1201
1202 cri_resp = alloc_smp_resp(CRI_RESP_SIZE);
1203 if (!cri_resp) {
1204 kfree(cri_req);
1205 return -ENOMEM;
1206 }
1207
1208 cri_req[1] = SMP_CONF_ROUTE_INFO;
1209 *(__be16 *)(cri_req+6) = cpu_to_be16(index);
1210 cri_req[9] = phy_id;
1211 if (SAS_ADDR(sas_addr) == 0 || !include)
1212 cri_req[12] |= 0x80;
1213 memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE);
1214
1215 res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp,
1216 CRI_RESP_SIZE);
1217 if (res)
1218 goto out;
1219 res = cri_resp[2];
1220 if (res == SMP_RESP_NO_INDEX) {
1221 SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
1222 "index 0x%x\n",
1223 SAS_ADDR(dev->sas_addr), phy_id, index);
1224 }
1225out:
1226 kfree(cri_req);
1227 kfree(cri_resp);
1228 return res;
1229}
1230
1231static int sas_configure_phy(struct domain_device *dev, int phy_id,
1232 u8 *sas_addr, int include)
1233{
1234 int index;
1235 int present;
1236 int res;
1237
1238 res = sas_configure_present(dev, phy_id, sas_addr, &index, &present);
1239 if (res)
1240 return res;
1241 if (include ^ present)
1242 return sas_configure_set(dev, phy_id, sas_addr, index,include);
1243
1244 return res;
1245}
1246
1247/**
1248 * sas_configure_parent -- configure routing table of parent
1249 * parent: parent expander
1250 * child: child expander
1251 * sas_addr: SAS port identifier of device directly attached to child
1252 */
1253static int sas_configure_parent(struct domain_device *parent,
1254 struct domain_device *child,
1255 u8 *sas_addr, int include)
1256{
1257 struct expander_device *ex_parent = &parent->ex_dev;
1258 int res = 0;
1259 int i;
1260
1261 if (parent->parent) {
1262 res = sas_configure_parent(parent->parent, parent, sas_addr,
1263 include);
1264 if (res)
1265 return res;
1266 }
1267
1268 if (ex_parent->conf_route_table == 0) {
1269 SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
1270 SAS_ADDR(parent->sas_addr));
1271 return 0;
1272 }
1273
1274 for (i = 0; i < ex_parent->num_phys; i++) {
1275 struct ex_phy *phy = &ex_parent->ex_phy[i];
1276
1277 if ((phy->routing_attr == TABLE_ROUTING) &&
1278 (SAS_ADDR(phy->attached_sas_addr) ==
1279 SAS_ADDR(child->sas_addr))) {
1280 res = sas_configure_phy(parent, i, sas_addr, include);
1281 if (res)
1282 return res;
1283 }
1284 }
1285
1286 return res;
1287}
1288
1289/**
1290 * sas_configure_routing -- configure routing
1291 * dev: expander device
1292 * sas_addr: port identifier of device directly attached to the expander device
1293 */
1294static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr)
1295{
1296 if (dev->parent)
1297 return sas_configure_parent(dev->parent, dev, sas_addr, 1);
1298 return 0;
1299}
1300
1301static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
1302{
1303 if (dev->parent)
1304 return sas_configure_parent(dev->parent, dev, sas_addr, 0);
1305 return 0;
1306}
1307
1308#if 0
1309#define SMP_BIN_ATTR_NAME "smp_portal"
1310
1311static void sas_ex_smp_hook(struct domain_device *dev)
1312{
1313 struct expander_device *ex_dev = &dev->ex_dev;
1314 struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
1315
1316 memset(bin_attr, 0, sizeof(*bin_attr));
1317
1318 bin_attr->attr.name = SMP_BIN_ATTR_NAME;
1319 bin_attr->attr.owner = THIS_MODULE;
1320 bin_attr->attr.mode = 0600;
1321
1322 bin_attr->size = 0;
1323 bin_attr->private = NULL;
1324 bin_attr->read = smp_portal_read;
1325 bin_attr->write= smp_portal_write;
1326 bin_attr->mmap = NULL;
1327
1328 ex_dev->smp_portal_pid = -1;
1329 init_MUTEX(&ex_dev->smp_sema);
1330}
1331#endif
1332
1333/**
1334 * sas_discover_expander -- expander discovery
1335 * @ex: pointer to expander domain device
1336 *
1337 * See comment in sas_discover_sata().
1338 */
1339static int sas_discover_expander(struct domain_device *dev)
1340{
1341 int res;
1342
1343 res = sas_notify_lldd_dev_found(dev);
1344 if (res)
1345 return res;
1346
1347 res = sas_ex_general(dev);
1348 if (res)
1349 goto out_err;
1350 res = sas_ex_manuf_info(dev);
1351 if (res)
1352 goto out_err;
1353
1354 res = sas_expander_discover(dev);
1355 if (res) {
1356 SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
1357 SAS_ADDR(dev->sas_addr), res);
1358 goto out_err;
1359 }
1360
1361 sas_check_ex_subtractive_boundary(dev);
1362 res = sas_check_parent_topology(dev);
1363 if (res)
1364 goto out_err;
1365 return 0;
1366out_err:
1367 sas_notify_lldd_dev_gone(dev);
1368 return res;
1369}
1370
1371static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
1372{
1373 int res = 0;
1374 struct domain_device *dev;
1375
1376 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
1377 if (dev->dev_type == EDGE_DEV ||
1378 dev->dev_type == FANOUT_DEV) {
1379 struct sas_expander_device *ex =
1380 rphy_to_expander_device(dev->rphy);
1381
1382 if (level == ex->level)
1383 res = sas_ex_discover_devices(dev, -1);
1384 else if (level > 0)
1385 res = sas_ex_discover_devices(port->port_dev, -1);
1386
1387 }
1388 }
1389
1390 return res;
1391}
1392
1393static int sas_ex_bfs_disc(struct asd_sas_port *port)
1394{
1395 int res;
1396 int level;
1397
1398 do {
1399 level = port->disc.max_level;
1400 res = sas_ex_level_discovery(port, level);
1401 mb();
1402 } while (level < port->disc.max_level);
1403
1404 return res;
1405}
1406
1407int sas_discover_root_expander(struct domain_device *dev)
1408{
1409 int res;
1410 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1411
1412 sas_rphy_add(dev->rphy);
1413
1414 ex->level = dev->port->disc.max_level; /* 0 */
1415 res = sas_discover_expander(dev);
1416 if (!res)
1417 sas_ex_bfs_disc(dev->port);
1418
1419 return res;
1420}
1421
1422/* ---------- Domain revalidation ---------- */
1423
1424static int sas_get_phy_discover(struct domain_device *dev,
1425 int phy_id, struct smp_resp *disc_resp)
1426{
1427 int res;
1428 u8 *disc_req;
1429
1430 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
1431 if (!disc_req)
1432 return -ENOMEM;
1433
1434 disc_req[1] = SMP_DISCOVER;
1435 disc_req[9] = phy_id;
1436
1437 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
1438 disc_resp, DISCOVER_RESP_SIZE);
1439 if (res)
1440 goto out;
1441 else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
1442 res = disc_resp->result;
1443 goto out;
1444 }
1445out:
1446 kfree(disc_req);
1447 return res;
1448}
1449
1450static int sas_get_phy_change_count(struct domain_device *dev,
1451 int phy_id, int *pcc)
1452{
1453 int res;
1454 struct smp_resp *disc_resp;
1455
1456 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1457 if (!disc_resp)
1458 return -ENOMEM;
1459
1460 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1461 if (!res)
1462 *pcc = disc_resp->disc.change_count;
1463
1464 kfree(disc_resp);
1465 return res;
1466}
1467
1468static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1469 int phy_id, u8 *attached_sas_addr)
1470{
1471 int res;
1472 struct smp_resp *disc_resp;
1473 struct discover_resp *dr;
1474
1475 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1476 if (!disc_resp)
1477 return -ENOMEM;
1478 dr = &disc_resp->disc;
1479
1480 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1481 if (!res) {
1482 memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
1483 if (dr->attached_dev_type == 0)
1484 memset(attached_sas_addr, 0, 8);
1485 }
1486 kfree(disc_resp);
1487 return res;
1488}
1489
1490static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1491 int from_phy)
1492{
1493 struct expander_device *ex = &dev->ex_dev;
1494 int res = 0;
1495 int i;
1496
1497 for (i = from_phy; i < ex->num_phys; i++) {
1498 int phy_change_count = 0;
1499
1500 res = sas_get_phy_change_count(dev, i, &phy_change_count);
1501 if (res)
1502 goto out;
1503 else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1504 ex->ex_phy[i].phy_change_count = phy_change_count;
1505 *phy_id = i;
1506 return 0;
1507 }
1508 }
1509out:
1510 return res;
1511}
1512
1513static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
1514{
1515 int res;
1516 u8 *rg_req;
1517 struct smp_resp *rg_resp;
1518
1519 rg_req = alloc_smp_req(RG_REQ_SIZE);
1520 if (!rg_req)
1521 return -ENOMEM;
1522
1523 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
1524 if (!rg_resp) {
1525 kfree(rg_req);
1526 return -ENOMEM;
1527 }
1528
1529 rg_req[1] = SMP_REPORT_GENERAL;
1530
1531 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
1532 RG_RESP_SIZE);
1533 if (res)
1534 goto out;
1535 if (rg_resp->result != SMP_RESP_FUNC_ACC) {
1536 res = rg_resp->result;
1537 goto out;
1538 }
1539
1540 *ecc = be16_to_cpu(rg_resp->rg.change_count);
1541out:
1542 kfree(rg_resp);
1543 kfree(rg_req);
1544 return res;
1545}
1546
1547static int sas_find_bcast_dev(struct domain_device *dev,
1548 struct domain_device **src_dev)
1549{
1550 struct expander_device *ex = &dev->ex_dev;
1551 int ex_change_count = -1;
1552 int res;
1553
1554 res = sas_get_ex_change_count(dev, &ex_change_count);
1555 if (res)
1556 goto out;
1557 if (ex_change_count != -1 &&
1558 ex_change_count != ex->ex_change_count) {
1559 *src_dev = dev;
1560 ex->ex_change_count = ex_change_count;
1561 } else {
1562 struct domain_device *ch;
1563
1564 list_for_each_entry(ch, &ex->children, siblings) {
1565 if (ch->dev_type == EDGE_DEV ||
1566 ch->dev_type == FANOUT_DEV) {
1567 res = sas_find_bcast_dev(ch, src_dev);
1568 if (src_dev)
1569 return res;
1570 }
1571 }
1572 }
1573out:
1574 return res;
1575}
1576
1577static void sas_unregister_ex_tree(struct domain_device *dev)
1578{
1579 struct expander_device *ex = &dev->ex_dev;
1580 struct domain_device *child, *n;
1581
1582 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1583 if (child->dev_type == EDGE_DEV ||
1584 child->dev_type == FANOUT_DEV)
1585 sas_unregister_ex_tree(child);
1586 else
1587 sas_unregister_dev(child);
1588 }
1589 sas_unregister_dev(dev);
1590}
1591
1592static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1593 int phy_id)
1594{
1595 struct expander_device *ex_dev = &parent->ex_dev;
1596 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1597 struct domain_device *child, *n;
1598
1599 list_for_each_entry_safe(child, n, &ex_dev->children, siblings) {
1600 if (SAS_ADDR(child->sas_addr) ==
1601 SAS_ADDR(phy->attached_sas_addr)) {
1602 if (child->dev_type == EDGE_DEV ||
1603 child->dev_type == FANOUT_DEV)
1604 sas_unregister_ex_tree(child);
1605 else
1606 sas_unregister_dev(child);
1607 break;
1608 }
1609 }
1610 sas_disable_routing(parent, phy->attached_sas_addr);
1611 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1612 sas_port_delete_phy(phy->port, phy->phy);
1613 if (phy->port->num_phys == 0)
1614 sas_port_delete(phy->port);
1615 phy->port = NULL;
1616}
1617
1618static int sas_discover_bfs_by_root_level(struct domain_device *root,
1619 const int level)
1620{
1621 struct expander_device *ex_root = &root->ex_dev;
1622 struct domain_device *child;
1623 int res = 0;
1624
1625 list_for_each_entry(child, &ex_root->children, siblings) {
1626 if (child->dev_type == EDGE_DEV ||
1627 child->dev_type == FANOUT_DEV) {
1628 struct sas_expander_device *ex =
1629 rphy_to_expander_device(child->rphy);
1630
1631 if (level > ex->level)
1632 res = sas_discover_bfs_by_root_level(child,
1633 level);
1634 else if (level == ex->level)
1635 res = sas_ex_discover_devices(child, -1);
1636 }
1637 }
1638 return res;
1639}
1640
1641static int sas_discover_bfs_by_root(struct domain_device *dev)
1642{
1643 int res;
1644 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1645 int level = ex->level+1;
1646
1647 res = sas_ex_discover_devices(dev, -1);
1648 if (res)
1649 goto out;
1650 do {
1651 res = sas_discover_bfs_by_root_level(dev, level);
1652 mb();
1653 level += 1;
1654 } while (level <= dev->port->disc.max_level);
1655out:
1656 return res;
1657}
1658
1659static int sas_discover_new(struct domain_device *dev, int phy_id)
1660{
1661 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1662 struct domain_device *child;
1663 int res;
1664
1665 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1666 SAS_ADDR(dev->sas_addr), phy_id);
1667 res = sas_ex_phy_discover(dev, phy_id);
1668 if (res)
1669 goto out;
1670 res = sas_ex_discover_devices(dev, phy_id);
1671 if (res)
1672 goto out;
1673 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1674 if (SAS_ADDR(child->sas_addr) ==
1675 SAS_ADDR(ex_phy->attached_sas_addr)) {
1676 if (child->dev_type == EDGE_DEV ||
1677 child->dev_type == FANOUT_DEV)
1678 res = sas_discover_bfs_by_root(child);
1679 break;
1680 }
1681 }
1682out:
1683 return res;
1684}
1685
1686static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1687{
1688 struct expander_device *ex = &dev->ex_dev;
1689 struct ex_phy *phy = &ex->ex_phy[phy_id];
1690 u8 attached_sas_addr[8];
1691 int res;
1692
1693 res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
1694 switch (res) {
1695 case SMP_RESP_NO_PHY:
1696 phy->phy_state = PHY_NOT_PRESENT;
1697 sas_unregister_devs_sas_addr(dev, phy_id);
1698 goto out; break;
1699 case SMP_RESP_PHY_VACANT:
1700 phy->phy_state = PHY_VACANT;
1701 sas_unregister_devs_sas_addr(dev, phy_id);
1702 goto out; break;
1703 case SMP_RESP_FUNC_ACC:
1704 break;
1705 }
1706
1707 if (SAS_ADDR(attached_sas_addr) == 0) {
1708 phy->phy_state = PHY_EMPTY;
1709 sas_unregister_devs_sas_addr(dev, phy_id);
1710 } else if (SAS_ADDR(attached_sas_addr) ==
1711 SAS_ADDR(phy->attached_sas_addr)) {
1712 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
1713 SAS_ADDR(dev->sas_addr), phy_id);
1714 sas_ex_phy_discover(dev, phy_id);
1715 } else
1716 res = sas_discover_new(dev, phy_id);
1717out:
1718 return res;
1719}
1720
1721static int sas_rediscover(struct domain_device *dev, const int phy_id)
1722{
1723 struct expander_device *ex = &dev->ex_dev;
1724 struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
1725 int res = 0;
1726 int i;
1727
1728 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
1729 SAS_ADDR(dev->sas_addr), phy_id);
1730
1731 if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
1732 for (i = 0; i < ex->num_phys; i++) {
1733 struct ex_phy *phy = &ex->ex_phy[i];
1734
1735 if (i == phy_id)
1736 continue;
1737 if (SAS_ADDR(phy->attached_sas_addr) ==
1738 SAS_ADDR(changed_phy->attached_sas_addr)) {
1739 SAS_DPRINTK("phy%d part of wide port with "
1740 "phy%d\n", phy_id, i);
1741 goto out;
1742 }
1743 }
1744 res = sas_rediscover_dev(dev, phy_id);
1745 } else
1746 res = sas_discover_new(dev, phy_id);
1747out:
1748 return res;
1749}
1750
1751/**
1752 * sas_revalidate_domain -- revalidate the domain
1753 * @port: port to the domain of interest
1754 *
1755 * NOTE: this process _must_ quit (return) as soon as any connection
1756 * errors are encountered. Connection recovery is done elsewhere.
1757 * Discover process only interrogates devices in order to discover the
1758 * domain.
1759 */
1760int sas_ex_revalidate_domain(struct domain_device *port_dev)
1761{
1762 int res;
1763 struct domain_device *dev = NULL;
1764
1765 res = sas_find_bcast_dev(port_dev, &dev);
1766 if (res)
1767 goto out;
1768 if (dev) {
1769 struct expander_device *ex = &dev->ex_dev;
1770 int i = 0, phy_id;
1771
1772 do {
1773 phy_id = -1;
1774 res = sas_find_bcast_phy(dev, &phy_id, i);
1775 if (phy_id == -1)
1776 break;
1777 res = sas_rediscover(dev, phy_id);
1778 i = phy_id + 1;
1779 } while (i < ex->num_phys);
1780 }
1781out:
1782 return res;
1783}
1784
1785#if 0
1786/* ---------- SMP portal ---------- */
1787
1788static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs,
1789 size_t size)
1790{
1791 struct domain_device *dev = to_dom_device(kobj);
1792 struct expander_device *ex = &dev->ex_dev;
1793
1794 if (offs != 0)
1795 return -EFBIG;
1796 else if (size == 0)
1797 return 0;
1798
1799 down_interruptible(&ex->smp_sema);
1800 if (ex->smp_req)
1801 kfree(ex->smp_req);
1802 ex->smp_req = kzalloc(size, GFP_USER);
1803 if (!ex->smp_req) {
1804 up(&ex->smp_sema);
1805 return -ENOMEM;
1806 }
1807 memcpy(ex->smp_req, buf, size);
1808 ex->smp_req_size = size;
1809 ex->smp_portal_pid = current->pid;
1810 up(&ex->smp_sema);
1811
1812 return size;
1813}
1814
1815static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs,
1816 size_t size)
1817{
1818 struct domain_device *dev = to_dom_device(kobj);
1819 struct expander_device *ex = &dev->ex_dev;
1820 u8 *smp_resp;
1821 int res = -EINVAL;
1822
1823 /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
1824 * it should be 0.
1825 */
1826
1827 down_interruptible(&ex->smp_sema);
1828 if (!ex->smp_req || ex->smp_portal_pid != current->pid)
1829 goto out;
1830
1831 res = 0;
1832 if (size == 0)
1833 goto out;
1834
1835 res = -ENOMEM;
1836 smp_resp = alloc_smp_resp(size);
1837 if (!smp_resp)
1838 goto out;
1839 res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size,
1840 smp_resp, size);
1841 if (!res) {
1842 memcpy(buf, smp_resp, size);
1843 res = size;
1844 }
1845
1846 kfree(smp_resp);
1847out:
1848 kfree(ex->smp_req);
1849 ex->smp_req = NULL;
1850 ex->smp_req_size = 0;
1851 ex->smp_portal_pid = -1;
1852 up(&ex->smp_sema);
1853 return res;
1854}
1855#endif
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
new file mode 100644
index 000000000000..c836a237fb79
--- /dev/null
+++ b/drivers/scsi/libsas/sas_init.c
@@ -0,0 +1,267 @@
1/*
2 * Serial Attached SCSI (SAS) Transport Layer initialization
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34
35#include "sas_internal.h"
36
37#include "../scsi_sas_internal.h"
38
39kmem_cache_t *sas_task_cache;
40
41/*------------ SAS addr hash -----------*/
42void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
43{
44 const u32 poly = 0x00DB2777;
45 u32 r = 0;
46 int i;
47
48 for (i = 0; i < 8; i++) {
49 int b;
50 for (b = 7; b >= 0; b--) {
51 r <<= 1;
52 if ((1 << b) & sas_addr[i]) {
53 if (!(r & 0x01000000))
54 r ^= poly;
55 } else if (r & 0x01000000)
56 r ^= poly;
57 }
58 }
59
60 hashed[0] = (r >> 16) & 0xFF;
61 hashed[1] = (r >> 8) & 0xFF ;
62 hashed[2] = r & 0xFF;
63}
64
65
66/* ---------- HA events ---------- */
67
68void sas_hae_reset(void *data)
69{
70 struct sas_ha_struct *ha = data;
71
72 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending);
74}
75
76int sas_register_ha(struct sas_ha_struct *sas_ha)
77{
78 int error = 0;
79
80 spin_lock_init(&sas_ha->phy_port_lock);
81 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
82
83 if (sas_ha->lldd_queue_size == 0)
84 sas_ha->lldd_queue_size = 1;
85 else if (sas_ha->lldd_queue_size == -1)
86 sas_ha->lldd_queue_size = 128; /* Sanity */
87
88 error = sas_register_phys(sas_ha);
89 if (error) {
90 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
91 return error;
92 }
93
94 error = sas_register_ports(sas_ha);
95 if (error) {
96 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
97 goto Undo_phys;
98 }
99
100 error = sas_init_events(sas_ha);
101 if (error) {
102 printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
103 goto Undo_ports;
104 }
105
106 if (sas_ha->lldd_max_execute_num > 1) {
107 error = sas_init_queue(sas_ha);
108 if (error) {
109 printk(KERN_NOTICE "couldn't start queue thread:%d, "
110 "running in direct mode\n", error);
111 sas_ha->lldd_max_execute_num = 1;
112 }
113 }
114
115 return 0;
116
117Undo_ports:
118 sas_unregister_ports(sas_ha);
119Undo_phys:
120
121 return error;
122}
123
124int sas_unregister_ha(struct sas_ha_struct *sas_ha)
125{
126 if (sas_ha->lldd_max_execute_num > 1) {
127 sas_shutdown_queue(sas_ha);
128 }
129
130 sas_unregister_ports(sas_ha);
131
132 return 0;
133}
134
135static int sas_get_linkerrors(struct sas_phy *phy)
136{
137 if (scsi_is_sas_phy_local(phy))
138 /* FIXME: we have no local phy stats
139 * gathering at this time */
140 return -EINVAL;
141
142 return sas_smp_get_phy_events(phy);
143}
144
145static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
146{
147 int ret;
148 enum phy_func reset_type;
149
150 if (hard_reset)
151 reset_type = PHY_FUNC_HARD_RESET;
152 else
153 reset_type = PHY_FUNC_LINK_RESET;
154
155 if (scsi_is_sas_phy_local(phy)) {
156 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
157 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
158 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
159 struct sas_internal *i =
160 to_sas_internal(sas_ha->core.shost->transportt);
161
162 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
163 } else {
164 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
165 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
166 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
167 }
168 return ret;
169}
170
171static int sas_set_phy_speed(struct sas_phy *phy,
172 struct sas_phy_linkrates *rates)
173{
174 int ret;
175
176 if ((rates->minimum_linkrate &&
177 rates->minimum_linkrate > phy->maximum_linkrate) ||
178 (rates->maximum_linkrate &&
179 rates->maximum_linkrate < phy->minimum_linkrate))
180 return -EINVAL;
181
182 if (rates->minimum_linkrate &&
183 rates->minimum_linkrate < phy->minimum_linkrate_hw)
184 rates->minimum_linkrate = phy->minimum_linkrate_hw;
185
186 if (rates->maximum_linkrate &&
187 rates->maximum_linkrate > phy->maximum_linkrate_hw)
188 rates->maximum_linkrate = phy->maximum_linkrate_hw;
189
190 if (scsi_is_sas_phy_local(phy)) {
191 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
192 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
193 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
194 struct sas_internal *i =
195 to_sas_internal(sas_ha->core.shost->transportt);
196
197 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
198 rates);
199 } else {
200 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
201 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
202 ret = sas_smp_phy_control(ddev, phy->number,
203 PHY_FUNC_LINK_RESET, rates);
204
205 }
206
207 return ret;
208}
209
210static struct sas_function_template sft = {
211 .phy_reset = sas_phy_reset,
212 .set_phy_speed = sas_set_phy_speed,
213 .get_linkerrors = sas_get_linkerrors,
214};
215
216struct scsi_transport_template *
217sas_domain_attach_transport(struct sas_domain_function_template *dft)
218{
219 struct scsi_transport_template *stt = sas_attach_transport(&sft);
220 struct sas_internal *i;
221
222 if (!stt)
223 return stt;
224
225 i = to_sas_internal(stt);
226 i->dft = dft;
227 stt->create_work_queue = 1;
228 stt->eh_timed_out = sas_scsi_timed_out;
229 stt->eh_strategy_handler = sas_scsi_recover_host;
230
231 return stt;
232}
233EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
234
235
236void sas_domain_release_transport(struct scsi_transport_template *stt)
237{
238 sas_release_transport(stt);
239}
240EXPORT_SYMBOL_GPL(sas_domain_release_transport);
241
242/* ---------- SAS Class register/unregister ---------- */
243
244static int __init sas_class_init(void)
245{
246 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
247 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
248 if (!sas_task_cache)
249 return -ENOMEM;
250
251 return 0;
252}
253
254static void __exit sas_class_exit(void)
255{
256 kmem_cache_destroy(sas_task_cache);
257}
258
259MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
260MODULE_DESCRIPTION("SAS Transport Layer");
261MODULE_LICENSE("GPL v2");
262
263module_init(sas_class_init);
264module_exit(sas_class_exit);
265
266EXPORT_SYMBOL_GPL(sas_register_ha);
267EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
new file mode 100644
index 000000000000..bffcee474921
--- /dev/null
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -0,0 +1,146 @@
1/*
2 * Serial Attached SCSI (SAS) class internal header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#ifndef _SAS_INTERNAL_H_
27#define _SAS_INTERNAL_H_
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_sas.h>
32#include <scsi/libsas.h>
33
34#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
35
36#ifdef SAS_DEBUG
37#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
38#else
39#define SAS_DPRINTK(fmt, ...)
40#endif
41
42void sas_scsi_recover_host(struct Scsi_Host *shost);
43
44int sas_show_class(enum sas_class class, char *buf);
45int sas_show_proto(enum sas_proto proto, char *buf);
46int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
47int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
48
49int sas_register_phys(struct sas_ha_struct *sas_ha);
50void sas_unregister_phys(struct sas_ha_struct *sas_ha);
51
52int sas_register_ports(struct sas_ha_struct *sas_ha);
53void sas_unregister_ports(struct sas_ha_struct *sas_ha);
54
55enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
56
57int sas_init_queue(struct sas_ha_struct *sas_ha);
58int sas_init_events(struct sas_ha_struct *sas_ha);
59void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60
61void sas_deform_port(struct asd_sas_phy *phy);
62
63void sas_porte_bytes_dmaed(void *);
64void sas_porte_broadcast_rcvd(void *);
65void sas_porte_link_reset_err(void *);
66void sas_porte_timer_event(void *);
67void sas_porte_hard_reset(void *);
68
69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *);
71
72int sas_smp_phy_control(struct domain_device *dev, int phy_id,
73 enum phy_func phy_func, struct sas_phy_linkrates *);
74int sas_smp_get_phy_events(struct sas_phy *phy);
75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77
78void sas_hae_reset(void *);
79
80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending,
82 struct work_struct *work,
83 struct Scsi_Host *shost)
84{
85 unsigned long flags;
86
87 spin_lock_irqsave(lock, flags);
88 if (test_bit(event, pending)) {
89 spin_unlock_irqrestore(lock, flags);
90 return;
91 }
92 __set_bit(event, pending);
93 spin_unlock_irqrestore(lock, flags);
94 scsi_queue_work(shost, work);
95}
96
97static inline void sas_begin_event(int event, spinlock_t *lock,
98 unsigned long *pending)
99{
100 unsigned long flags;
101
102 spin_lock_irqsave(lock, flags);
103 __clear_bit(event, pending);
104 spin_unlock_irqrestore(lock, flags);
105}
106
107static inline void sas_fill_in_rphy(struct domain_device *dev,
108 struct sas_rphy *rphy)
109{
110 rphy->identify.sas_address = SAS_ADDR(dev->sas_addr);
111 rphy->identify.initiator_port_protocols = dev->iproto;
112 rphy->identify.target_port_protocols = dev->tproto;
113 switch (dev->dev_type) {
114 case SATA_DEV:
115 /* FIXME: need sata device type */
116 case SAS_END_DEV:
117 rphy->identify.device_type = SAS_END_DEVICE;
118 break;
119 case EDGE_DEV:
120 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
121 break;
122 case FANOUT_DEV:
123 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
124 break;
125 default:
126 rphy->identify.device_type = SAS_PHY_UNUSED;
127 break;
128 }
129}
130
131static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
132{
133 struct expander_device *ex = &dev->ex_dev;
134 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
135
136 if (!ex->parent_port) {
137 ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
138 /* FIXME: error handling */
139 BUG_ON(!ex->parent_port);
140 BUG_ON(sas_port_add(ex->parent_port));
141 sas_port_mark_backlink(ex->parent_port);
142 }
143 sas_port_add_phy(ex->parent_port, ex_phy->phy);
144}
145
146#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
new file mode 100644
index 000000000000..9340cdbae4a3
--- /dev/null
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -0,0 +1,158 @@
1/*
2 * Serial Attached SCSI (SAS) Phy class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/* ---------- Phy events ---------- */
32
33static void sas_phye_loss_of_signal(void *data)
34{
35 struct asd_sas_phy *phy = data;
36
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending);
39 phy->error = 0;
40 sas_deform_port(phy);
41}
42
43static void sas_phye_oob_done(void *data)
44{
45 struct asd_sas_phy *phy = data;
46
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending);
49 phy->error = 0;
50}
51
52static void sas_phye_oob_error(void *data)
53{
54 struct asd_sas_phy *phy = data;
55 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i =
58 to_sas_internal(sas_ha->core.shost->transportt);
59
60 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
61 &phy->phy_events_pending);
62
63 sas_deform_port(phy);
64
65 if (!port && phy->enabled && i->dft->lldd_control_phy) {
66 phy->error++;
67 switch (phy->error) {
68 case 1:
69 case 2:
70 i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET,
71 NULL);
72 break;
73 case 3:
74 default:
75 phy->error = 0;
76 phy->enabled = 0;
77 i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
78 break;
79 }
80 }
81}
82
83static void sas_phye_spinup_hold(void *data)
84{
85 struct asd_sas_phy *phy = data;
86 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt);
89
90 sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
91 &phy->phy_events_pending);
92
93 phy->error = 0;
94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
95}
96
97/* ---------- Phy class registration ---------- */
98
99int sas_register_phys(struct sas_ha_struct *sas_ha)
100{
101 int i;
102
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 };
109
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
114 [PORTE_TIMER_EVENT] = sas_porte_timer_event,
115 [PORTE_HARD_RESET] = sas_porte_hard_reset,
116 };
117
118 /* Now register the phys. */
119 for (i = 0; i < sas_ha->num_phys; i++) {
120 int k;
121 struct asd_sas_phy *phy = sas_ha->sas_phy[i];
122
123 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++)
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
127 phy);
128
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL;
133 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock);
135 spin_lock_init(&phy->sas_prim_lock);
136 phy->frame_rcvd_size = 0;
137
138 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev,
139 i);
140 if (!phy->phy)
141 return -ENOMEM;
142
143 phy->phy->identify.initiator_port_protocols =
144 phy->iproto;
145 phy->phy->identify.target_port_protocols = phy->tproto;
146 phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr);
147 phy->phy->identify.phy_identifier = i;
148 phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
149 phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
150 phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
151 phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
152 phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
153
154 sas_phy_add(phy->phy);
155 }
156
157 return 0;
158}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
new file mode 100644
index 000000000000..253cdcf306a2
--- /dev/null
+++ b/drivers/scsi/libsas/sas_port.c
@@ -0,0 +1,279 @@
1/*
2 * Serial Attached SCSI (SAS) Port class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/**
32 * sas_form_port -- add this phy to a port
33 * @phy: the phy of interest
34 *
35 * This function adds this phy to an existing port, thus creating a wide
36 * port, or it creates a port and adds the phy to the port.
37 */
38static void sas_form_port(struct asd_sas_phy *phy)
39{
40 int i;
41 struct sas_ha_struct *sas_ha = phy->ha;
42 struct asd_sas_port *port = phy->port;
43 struct sas_internal *si =
44 to_sas_internal(sas_ha->core.shost->transportt);
45
46 if (port) {
47 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
48 SAS_ADDR_SIZE) == 0)
49 sas_deform_port(phy);
50 else {
51 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
52 __FUNCTION__, phy->id, phy->port->id,
53 phy->port->num_phys);
54 return;
55 }
56 }
57
58 /* find a port */
59 spin_lock(&sas_ha->phy_port_lock);
60 for (i = 0; i < sas_ha->num_phys; i++) {
61 port = sas_ha->sas_port[i];
62 spin_lock(&port->phy_list_lock);
63 if (*(u64 *) port->sas_addr &&
64 memcmp(port->attached_sas_addr,
65 phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
66 port->num_phys > 0) {
67 /* wide port */
68 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
69 port->id);
70 break;
71 } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
72 memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
73 break;
74 }
75 spin_unlock(&port->phy_list_lock);
76 }
77
78 if (i >= sas_ha->num_phys) {
79 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
80 __FUNCTION__);
81 spin_unlock(&sas_ha->phy_port_lock);
82 return;
83 }
84
85 /* add the phy to the port */
86 list_add_tail(&phy->port_phy_el, &port->phy_list);
87 phy->port = port;
88 port->num_phys++;
89 port->phy_mask |= (1U << phy->id);
90
91 if (!port->phy)
92 port->phy = phy->phy;
93
94 SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
95 port->id, port->phy_mask);
96
97 if (*(u64 *)port->attached_sas_addr == 0) {
98 port->class = phy->class;
99 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
100 SAS_ADDR_SIZE);
101 port->iproto = phy->iproto;
102 port->tproto = phy->tproto;
103 port->oob_mode = phy->oob_mode;
104 port->linkrate = phy->linkrate;
105 } else
106 port->linkrate = max(port->linkrate, phy->linkrate);
107 spin_unlock(&port->phy_list_lock);
108 spin_unlock(&sas_ha->phy_port_lock);
109
110 if (!port->port) {
111 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
112 BUG_ON(!port->port);
113 sas_port_add(port->port);
114 }
115 sas_port_add_phy(port->port, phy->phy);
116
117 if (port->port_dev)
118 port->port_dev->pathways = port->num_phys;
119
120 /* Tell the LLDD about this port formation. */
121 if (si->dft->lldd_port_formed)
122 si->dft->lldd_port_formed(phy);
123
124 sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
125}
126
127/**
128 * sas_deform_port -- remove this phy from the port it belongs to
129 * @phy: the phy of interest
130 *
131 * This is called when the physical link to the other phy has been
132 * lost (on this phy), in Event thread context. We cannot delay here.
133 */
134void sas_deform_port(struct asd_sas_phy *phy)
135{
136 struct sas_ha_struct *sas_ha = phy->ha;
137 struct asd_sas_port *port = phy->port;
138 struct sas_internal *si =
139 to_sas_internal(sas_ha->core.shost->transportt);
140
141 if (!port)
142 return; /* done by a phy event */
143
144 if (port->port_dev)
145 port->port_dev->pathways--;
146
147 if (port->num_phys == 1) {
148 sas_unregister_domain_devices(port);
149 sas_port_delete(port->port);
150 port->port = NULL;
151 } else
152 sas_port_delete_phy(port->port, phy->phy);
153
154
155 if (si->dft->lldd_port_deformed)
156 si->dft->lldd_port_deformed(phy);
157
158 spin_lock(&sas_ha->phy_port_lock);
159 spin_lock(&port->phy_list_lock);
160
161 list_del_init(&phy->port_phy_el);
162 phy->port = NULL;
163 port->num_phys--;
164 port->phy_mask &= ~(1U << phy->id);
165
166 if (port->num_phys == 0) {
167 INIT_LIST_HEAD(&port->phy_list);
168 memset(port->sas_addr, 0, SAS_ADDR_SIZE);
169 memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
170 port->class = 0;
171 port->iproto = 0;
172 port->tproto = 0;
173 port->oob_mode = 0;
174 port->phy_mask = 0;
175 }
176 spin_unlock(&port->phy_list_lock);
177 spin_unlock(&sas_ha->phy_port_lock);
178
179 return;
180}
181
182/* ---------- SAS port events ---------- */
183
184void sas_porte_bytes_dmaed(void *data)
185{
186 struct asd_sas_phy *phy = data;
187
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending);
190
191 sas_form_port(phy);
192}
193
194void sas_porte_broadcast_rcvd(void *data)
195{
196 unsigned long flags;
197 u32 prim;
198 struct asd_sas_phy *phy = data;
199
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending);
202
203 spin_lock_irqsave(&phy->sas_prim_lock, flags);
204 prim = phy->sas_prim;
205 spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
206
207 SAS_DPRINTK("broadcast received: %d\n", prim);
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209}
210
211void sas_porte_link_reset_err(void *data)
212{
213 struct asd_sas_phy *phy = data;
214
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending);
217
218 sas_deform_port(phy);
219}
220
221void sas_porte_timer_event(void *data)
222{
223 struct asd_sas_phy *phy = data;
224
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending);
227
228 sas_deform_port(phy);
229}
230
231void sas_porte_hard_reset(void *data)
232{
233 struct asd_sas_phy *phy = data;
234
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending);
237
238 sas_deform_port(phy);
239}
240
241/* ---------- SAS port registration ---------- */
242
243static void sas_init_port(struct asd_sas_port *port,
244 struct sas_ha_struct *sas_ha, int i)
245{
246 port->id = i;
247 INIT_LIST_HEAD(&port->dev_list);
248 spin_lock_init(&port->phy_list_lock);
249 INIT_LIST_HEAD(&port->phy_list);
250 port->num_phys = 0;
251 port->phy_mask = 0;
252 port->ha = sas_ha;
253
254 spin_lock_init(&port->dev_list_lock);
255}
256
257int sas_register_ports(struct sas_ha_struct *sas_ha)
258{
259 int i;
260
261 /* initialize the ports and discovery */
262 for (i = 0; i < sas_ha->num_phys; i++) {
263 struct asd_sas_port *port = sas_ha->sas_port[i];
264
265 sas_init_port(port, sas_ha, i);
266 sas_init_disc(&port->disc, port);
267 }
268 return 0;
269}
270
271void sas_unregister_ports(struct sas_ha_struct *sas_ha)
272{
273 int i;
274
275 for (i = 0; i < sas_ha->num_phys; i++)
276 if (sas_ha->sas_phy[i]->port)
277 sas_deform_port(sas_ha->sas_phy[i]);
278
279}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
new file mode 100644
index 000000000000..7f9e89bcac7e
--- /dev/null
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -0,0 +1,786 @@
1/*
2 * Serial Attached SCSI (SAS) class SCSI Host glue.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include "sas_internal.h"
27
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h"
35
36#include <linux/err.h>
37#include <linux/blkdev.h>
38#include <linux/scatterlist.h>
39
40/* ---------- SCSI Host glue ---------- */
41
42#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
43#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
44
45static void sas_scsi_task_done(struct sas_task *task)
46{
47 struct task_status_struct *ts = &task->task_status;
48 struct scsi_cmnd *sc = task->uldd_task;
49 unsigned ts_flags = task->task_state_flags;
50 int hs = 0, stat = 0;
51
52 if (unlikely(!sc)) {
53 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
54 list_del_init(&task->list);
55 sas_free_task(task);
56 return;
57 }
58
59 if (ts->resp == SAS_TASK_UNDELIVERED) {
60 /* transport error */
61 hs = DID_NO_CONNECT;
62 } else { /* ts->resp == SAS_TASK_COMPLETE */
63 /* task delivered, what happened afterwards? */
64 switch (ts->stat) {
65 case SAS_DEV_NO_RESPONSE:
66 case SAS_INTERRUPTED:
67 case SAS_PHY_DOWN:
68 case SAS_NAK_R_ERR:
69 case SAS_OPEN_TO:
70 hs = DID_NO_CONNECT;
71 break;
72 case SAS_DATA_UNDERRUN:
73 sc->resid = ts->residual;
74 if (sc->request_bufflen - sc->resid < sc->underflow)
75 hs = DID_ERROR;
76 break;
77 case SAS_DATA_OVERRUN:
78 hs = DID_ERROR;
79 break;
80 case SAS_QUEUE_FULL:
81 hs = DID_SOFT_ERROR; /* retry */
82 break;
83 case SAS_DEVICE_UNKNOWN:
84 hs = DID_BAD_TARGET;
85 break;
86 case SAS_SG_ERR:
87 hs = DID_PARITY;
88 break;
89 case SAS_OPEN_REJECT:
90 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
91 hs = DID_SOFT_ERROR; /* retry */
92 else
93 hs = DID_ERROR;
94 break;
95 case SAS_PROTO_RESPONSE:
96 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
97 "task; please report this\n",
98 task->dev->port->ha->sas_ha_name);
99 break;
100 case SAS_ABORTED_TASK:
101 hs = DID_ABORT;
102 break;
103 case SAM_CHECK_COND:
104 memcpy(sc->sense_buffer, ts->buf,
105 max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
106 stat = SAM_CHECK_COND;
107 break;
108 default:
109 stat = ts->stat;
110 break;
111 }
112 }
113 ASSIGN_SAS_TASK(sc, NULL);
114 sc->result = (hs << 16) | stat;
115 list_del_init(&task->list);
116 sas_free_task(task);
117 /* This is very ugly but this is how SCSI Core works. */
118 if (ts_flags & SAS_TASK_STATE_ABORTED)
119 scsi_finish_command(sc);
120 else
121 sc->scsi_done(sc);
122}
123
124static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
125{
126 enum task_attribute ta = TASK_ATTR_SIMPLE;
127 if (cmd->request && blk_rq_tagged(cmd->request)) {
128 if (cmd->device->ordered_tags &&
129 (cmd->request->flags & REQ_HARDBARRIER))
130 ta = TASK_ATTR_HOQ;
131 }
132 return ta;
133}
134
135static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
136 struct domain_device *dev,
137 gfp_t gfp_flags)
138{
139 struct sas_task *task = sas_alloc_task(gfp_flags);
140 struct scsi_lun lun;
141
142 if (!task)
143 return NULL;
144
145 *(u32 *)cmd->sense_buffer = 0;
146 task->uldd_task = cmd;
147 ASSIGN_SAS_TASK(cmd, task);
148
149 task->dev = dev;
150 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
151
152 task->ssp_task.retry_count = 1;
153 int_to_scsilun(cmd->device->lun, &lun);
154 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
155 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
156 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
157
158 task->scatter = cmd->request_buffer;
159 task->num_scatter = cmd->use_sg;
160 task->total_xfer_len = cmd->request_bufflen;
161 task->data_dir = cmd->sc_data_direction;
162
163 task->task_done = sas_scsi_task_done;
164
165 return task;
166}
167
168static int sas_queue_up(struct sas_task *task)
169{
170 struct sas_ha_struct *sas_ha = task->dev->port->ha;
171 struct scsi_core *core = &sas_ha->core;
172 unsigned long flags;
173 LIST_HEAD(list);
174
175 spin_lock_irqsave(&core->task_queue_lock, flags);
176 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
177 spin_unlock_irqrestore(&core->task_queue_lock, flags);
178 return -SAS_QUEUE_FULL;
179 }
180 list_add_tail(&task->list, &core->task_queue);
181 core->task_queue_size += 1;
182 spin_unlock_irqrestore(&core->task_queue_lock, flags);
183 up(&core->queue_thread_sema);
184
185 return 0;
186}
187
188/**
189 * sas_queuecommand -- Enqueue a command for processing
190 * @parameters: See SCSI Core documentation
191 *
192 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
193 * call us without holding an IRQ spinlock...
194 */
195int sas_queuecommand(struct scsi_cmnd *cmd,
196 void (*scsi_done)(struct scsi_cmnd *))
197{
198 int res = 0;
199 struct domain_device *dev = cmd_to_domain_dev(cmd);
200 struct Scsi_Host *host = cmd->device->host;
201 struct sas_internal *i = to_sas_internal(host->transportt);
202
203 spin_unlock_irq(host->host_lock);
204
205 {
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task;
208
209 res = -ENOMEM;
210 task = sas_create_task(cmd, dev, GFP_ATOMIC);
211 if (!task)
212 goto out;
213
214 cmd->scsi_done = scsi_done;
215 /* Queue up, Direct Mode or Task Collector Mode. */
216 if (sas_ha->lldd_max_execute_num < 2)
217 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
218 else
219 res = sas_queue_up(task);
220
221 /* Examine */
222 if (res) {
223 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
224 ASSIGN_SAS_TASK(cmd, NULL);
225 sas_free_task(task);
226 if (res == -SAS_QUEUE_FULL) {
227 cmd->result = DID_SOFT_ERROR << 16; /* retry */
228 res = 0;
229 scsi_done(cmd);
230 }
231 goto out;
232 }
233 }
234out:
235 spin_lock_irq(host->host_lock);
236 return res;
237}
238
239static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
240{
241 struct scsi_cmnd *cmd, *n;
242
243 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
244 if (cmd == my_cmd)
245 list_del_init(&cmd->eh_entry);
246 }
247}
248
249static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
250 struct domain_device *dev)
251{
252 struct scsi_cmnd *cmd, *n;
253
254 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
255 struct domain_device *x = cmd_to_domain_dev(cmd);
256
257 if (x == dev)
258 list_del_init(&cmd->eh_entry);
259 }
260}
261
262static void sas_scsi_clear_queue_port(struct list_head *error_q,
263 struct asd_sas_port *port)
264{
265 struct scsi_cmnd *cmd, *n;
266
267 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
268 struct domain_device *dev = cmd_to_domain_dev(cmd);
269 struct asd_sas_port *x = dev->port;
270
271 if (x == port)
272 list_del_init(&cmd->eh_entry);
273 }
274}
275
276enum task_disposition {
277 TASK_IS_DONE,
278 TASK_IS_ABORTED,
279 TASK_IS_AT_LU,
280 TASK_IS_NOT_AT_LU,
281};
282
283static enum task_disposition sas_scsi_find_task(struct sas_task *task)
284{
285 struct sas_ha_struct *ha = task->dev->port->ha;
286 unsigned long flags;
287 int i, res;
288 struct sas_internal *si =
289 to_sas_internal(task->dev->port->ha->core.shost->transportt);
290
291 if (ha->lldd_max_execute_num > 1) {
292 struct scsi_core *core = &ha->core;
293 struct sas_task *t, *n;
294
295 spin_lock_irqsave(&core->task_queue_lock, flags);
296 list_for_each_entry_safe(t, n, &core->task_queue, list) {
297 if (task == t) {
298 list_del_init(&t->list);
299 spin_unlock_irqrestore(&core->task_queue_lock,
300 flags);
301 SAS_DPRINTK("%s: task 0x%p aborted from "
302 "task_queue\n",
303 __FUNCTION__, task);
304 return TASK_IS_ABORTED;
305 }
306 }
307 spin_unlock_irqrestore(&core->task_queue_lock, flags);
308 }
309
310 for (i = 0; i < 5; i++) {
311 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
312 res = si->dft->lldd_abort_task(task);
313
314 spin_lock_irqsave(&task->task_state_lock, flags);
315 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
316 spin_unlock_irqrestore(&task->task_state_lock, flags);
317 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
318 task);
319 return TASK_IS_DONE;
320 }
321 spin_unlock_irqrestore(&task->task_state_lock, flags);
322
323 if (res == TMF_RESP_FUNC_COMPLETE) {
324 SAS_DPRINTK("%s: task 0x%p is aborted\n",
325 __FUNCTION__, task);
326 return TASK_IS_ABORTED;
327 } else if (si->dft->lldd_query_task) {
328 SAS_DPRINTK("%s: querying task 0x%p\n",
329 __FUNCTION__, task);
330 res = si->dft->lldd_query_task(task);
331 if (res == TMF_RESP_FUNC_SUCC) {
332 SAS_DPRINTK("%s: task 0x%p at LU\n",
333 __FUNCTION__, task);
334 return TASK_IS_AT_LU;
335 } else if (res == TMF_RESP_FUNC_COMPLETE) {
336 SAS_DPRINTK("%s: task 0x%p not at LU\n",
337 __FUNCTION__, task);
338 return TASK_IS_NOT_AT_LU;
339 }
340 }
341 }
342 return res;
343}
344
345static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
346{
347 int res = TMF_RESP_FUNC_FAILED;
348 struct scsi_lun lun;
349 struct sas_internal *i =
350 to_sas_internal(dev->port->ha->core.shost->transportt);
351
352 int_to_scsilun(cmd->device->lun, &lun);
353
354 SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
355 SAS_ADDR(dev->sas_addr),
356 cmd->device->lun);
357
358 if (i->dft->lldd_abort_task_set)
359 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
360
361 if (res == TMF_RESP_FUNC_FAILED) {
362 if (i->dft->lldd_clear_task_set)
363 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
364 }
365
366 if (res == TMF_RESP_FUNC_FAILED) {
367 if (i->dft->lldd_lu_reset)
368 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
369 }
370
371 return res;
372}
373
374static int sas_recover_I_T(struct domain_device *dev)
375{
376 int res = TMF_RESP_FUNC_FAILED;
377 struct sas_internal *i =
378 to_sas_internal(dev->port->ha->core.shost->transportt);
379
380 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
381 SAS_ADDR(dev->sas_addr));
382
383 if (i->dft->lldd_I_T_nexus_reset)
384 res = i->dft->lldd_I_T_nexus_reset(dev);
385
386 return res;
387}
388
389void sas_scsi_recover_host(struct Scsi_Host *shost)
390{
391 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
392 unsigned long flags;
393 LIST_HEAD(error_q);
394 struct scsi_cmnd *cmd, *n;
395 enum task_disposition res = TASK_IS_DONE;
396 int tmf_resp;
397 struct sas_internal *i = to_sas_internal(shost->transportt);
398
399 spin_lock_irqsave(shost->host_lock, flags);
400 list_splice_init(&shost->eh_cmd_q, &error_q);
401 spin_unlock_irqrestore(shost->host_lock, flags);
402
403 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
404
405 /* All tasks on this list were marked SAS_TASK_STATE_ABORTED
406 * by sas_scsi_timed_out() callback.
407 */
408Again:
409 SAS_DPRINTK("going over list...\n");
410 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
411 struct sas_task *task = TO_SAS_TASK(cmd);
412
413 SAS_DPRINTK("trying to find task 0x%p\n", task);
414 list_del_init(&cmd->eh_entry);
415 res = sas_scsi_find_task(task);
416
417 cmd->eh_eflags = 0;
418 shost->host_failed--;
419
420 switch (res) {
421 case TASK_IS_DONE:
422 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
423 task);
424 task->task_done(task);
425 continue;
426 case TASK_IS_ABORTED:
427 SAS_DPRINTK("%s: task 0x%p is aborted\n",
428 __FUNCTION__, task);
429 task->task_done(task);
430 continue;
431 case TASK_IS_AT_LU:
432 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
433 tmf_resp = sas_recover_lu(task->dev, cmd);
434 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
435 SAS_DPRINTK("dev %016llx LU %x is "
436 "recovered\n",
437 SAS_ADDR(task->dev),
438 cmd->device->lun);
439 task->task_done(task);
440 sas_scsi_clear_queue_lu(&error_q, cmd);
441 goto Again;
442 }
443 /* fallthrough */
444 case TASK_IS_NOT_AT_LU:
445 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
446 task);
447 tmf_resp = sas_recover_I_T(task->dev);
448 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
449 SAS_DPRINTK("I_T %016llx recovered\n",
450 SAS_ADDR(task->dev->sas_addr));
451 task->task_done(task);
452 sas_scsi_clear_queue_I_T(&error_q, task->dev);
453 goto Again;
454 }
455 /* Hammer time :-) */
456 if (i->dft->lldd_clear_nexus_port) {
457 struct asd_sas_port *port = task->dev->port;
458 SAS_DPRINTK("clearing nexus for port:%d\n",
459 port->id);
460 res = i->dft->lldd_clear_nexus_port(port);
461 if (res == TMF_RESP_FUNC_COMPLETE) {
462 SAS_DPRINTK("clear nexus port:%d "
463 "succeeded\n", port->id);
464 task->task_done(task);
465 sas_scsi_clear_queue_port(&error_q,
466 port);
467 goto Again;
468 }
469 }
470 if (i->dft->lldd_clear_nexus_ha) {
471 SAS_DPRINTK("clear nexus ha\n");
472 res = i->dft->lldd_clear_nexus_ha(ha);
473 if (res == TMF_RESP_FUNC_COMPLETE) {
474 SAS_DPRINTK("clear nexus ha "
475 "succeeded\n");
476 task->task_done(task);
477 goto out;
478 }
479 }
480 /* If we are here -- this means that no amount
481 * of effort could recover from errors. Quite
482 * possibly the HA just disappeared.
483 */
484 SAS_DPRINTK("error from device %llx, LUN %x "
485 "couldn't be recovered in any way\n",
486 SAS_ADDR(task->dev->sas_addr),
487 cmd->device->lun);
488
489 task->task_done(task);
490 goto clear_q;
491 }
492 }
493out:
494 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
495 return;
496clear_q:
497 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
498 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
499 struct sas_task *task = TO_SAS_TASK(cmd);
500 list_del_init(&cmd->eh_entry);
501 task->task_done(task);
502 }
503}
504
505enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
506{
507 struct sas_task *task = TO_SAS_TASK(cmd);
508 unsigned long flags;
509
510 if (!task) {
511 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
512 cmd, task);
513 return EH_HANDLED;
514 }
515
516 spin_lock_irqsave(&task->task_state_lock, flags);
517 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
518 spin_unlock_irqrestore(&task->task_state_lock, flags);
519 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
520 cmd, task);
521 return EH_HANDLED;
522 }
523 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
524 spin_unlock_irqrestore(&task->task_state_lock, flags);
525
526 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
527 cmd, task);
528
529 return EH_NOT_HANDLED;
530}
531
532struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
533{
534 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
535 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
536 struct domain_device *found_dev = NULL;
537 int i;
538
539 spin_lock(&ha->phy_port_lock);
540 for (i = 0; i < ha->num_phys; i++) {
541 struct asd_sas_port *port = ha->sas_port[i];
542 struct domain_device *dev;
543
544 spin_lock(&port->dev_list_lock);
545 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
546 if (rphy == dev->rphy) {
547 found_dev = dev;
548 spin_unlock(&port->dev_list_lock);
549 goto found;
550 }
551 }
552 spin_unlock(&port->dev_list_lock);
553 }
554 found:
555 spin_unlock(&ha->phy_port_lock);
556
557 return found_dev;
558}
559
560static inline struct domain_device *sas_find_target(struct scsi_target *starget)
561{
562 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
563
564 return sas_find_dev_by_rphy(rphy);
565}
566
567int sas_target_alloc(struct scsi_target *starget)
568{
569 struct domain_device *found_dev = sas_find_target(starget);
570
571 if (!found_dev)
572 return -ENODEV;
573
574 starget->hostdata = found_dev;
575 return 0;
576}
577
578#define SAS_DEF_QD 32
579#define SAS_MAX_QD 64
580
581int sas_slave_configure(struct scsi_device *scsi_dev)
582{
583 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
584 struct sas_ha_struct *sas_ha;
585
586 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
587
588 sas_ha = dev->port->ha;
589
590 sas_read_port_mode_page(scsi_dev);
591
592 if (scsi_dev->tagged_supported) {
593 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
594 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
595 } else {
596 SAS_DPRINTK("device %llx, LUN %x doesn't support "
597 "TCQ\n", SAS_ADDR(dev->sas_addr),
598 scsi_dev->lun);
599 scsi_dev->tagged_supported = 0;
600 scsi_set_tag_type(scsi_dev, 0);
601 scsi_deactivate_tcq(scsi_dev, 1);
602 }
603
604 return 0;
605}
606
607void sas_slave_destroy(struct scsi_device *scsi_dev)
608{
609}
610
611int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
612{
613 int res = min(new_depth, SAS_MAX_QD);
614
615 if (scsi_dev->tagged_supported)
616 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
617 res);
618 else {
619 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
620 sas_printk("device %llx LUN %x queue depth changed to 1\n",
621 SAS_ADDR(dev->sas_addr),
622 scsi_dev->lun);
623 scsi_adjust_queue_depth(scsi_dev, 0, 1);
624 res = 1;
625 }
626
627 return res;
628}
629
630int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
631{
632 if (!scsi_dev->tagged_supported)
633 return 0;
634
635 scsi_deactivate_tcq(scsi_dev, 1);
636
637 scsi_set_tag_type(scsi_dev, qt);
638 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
639
640 return qt;
641}
642
643int sas_bios_param(struct scsi_device *scsi_dev,
644 struct block_device *bdev,
645 sector_t capacity, int *hsc)
646{
647 hsc[0] = 255;
648 hsc[1] = 63;
649 sector_div(capacity, 255*63);
650 hsc[2] = capacity;
651
652 return 0;
653}
654
655/* ---------- Task Collector Thread implementation ---------- */
656
657static void sas_queue(struct sas_ha_struct *sas_ha)
658{
659 struct scsi_core *core = &sas_ha->core;
660 unsigned long flags;
661 LIST_HEAD(q);
662 int can_queue;
663 int res;
664 struct sas_internal *i = to_sas_internal(core->shost->transportt);
665
666 spin_lock_irqsave(&core->task_queue_lock, flags);
667 while (!core->queue_thread_kill &&
668 !list_empty(&core->task_queue)) {
669
670 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
671 if (can_queue >= 0) {
672 can_queue = core->task_queue_size;
673 list_splice_init(&core->task_queue, &q);
674 } else {
675 struct list_head *a, *n;
676
677 can_queue = sas_ha->lldd_queue_size;
678 list_for_each_safe(a, n, &core->task_queue) {
679 list_move_tail(a, &q);
680 if (--can_queue == 0)
681 break;
682 }
683 can_queue = sas_ha->lldd_queue_size;
684 }
685 core->task_queue_size -= can_queue;
686 spin_unlock_irqrestore(&core->task_queue_lock, flags);
687 {
688 struct sas_task *task = list_entry(q.next,
689 struct sas_task,
690 list);
691 list_del_init(&q);
692 res = i->dft->lldd_execute_task(task, can_queue,
693 GFP_KERNEL);
694 if (unlikely(res))
695 __list_add(&q, task->list.prev, &task->list);
696 }
697 spin_lock_irqsave(&core->task_queue_lock, flags);
698 if (res) {
699 list_splice_init(&q, &core->task_queue); /*at head*/
700 core->task_queue_size += can_queue;
701 }
702 }
703 spin_unlock_irqrestore(&core->task_queue_lock, flags);
704}
705
706static DECLARE_COMPLETION(queue_th_comp);
707
708/**
709 * sas_queue_thread -- The Task Collector thread
710 * @_sas_ha: pointer to struct sas_ha
711 */
712static int sas_queue_thread(void *_sas_ha)
713{
714 struct sas_ha_struct *sas_ha = _sas_ha;
715 struct scsi_core *core = &sas_ha->core;
716
717 daemonize("sas_queue_%d", core->shost->host_no);
718 current->flags |= PF_NOFREEZE;
719
720 complete(&queue_th_comp);
721
722 while (1) {
723 down_interruptible(&core->queue_thread_sema);
724 sas_queue(sas_ha);
725 if (core->queue_thread_kill)
726 break;
727 }
728
729 complete(&queue_th_comp);
730
731 return 0;
732}
733
734int sas_init_queue(struct sas_ha_struct *sas_ha)
735{
736 int res;
737 struct scsi_core *core = &sas_ha->core;
738
739 spin_lock_init(&core->task_queue_lock);
740 core->task_queue_size = 0;
741 INIT_LIST_HEAD(&core->task_queue);
742 init_MUTEX_LOCKED(&core->queue_thread_sema);
743
744 res = kernel_thread(sas_queue_thread, sas_ha, 0);
745 if (res >= 0)
746 wait_for_completion(&queue_th_comp);
747
748 return res < 0 ? res : 0;
749}
750
751void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
752{
753 unsigned long flags;
754 struct scsi_core *core = &sas_ha->core;
755 struct sas_task *task, *n;
756
757 init_completion(&queue_th_comp);
758 core->queue_thread_kill = 1;
759 up(&core->queue_thread_sema);
760 wait_for_completion(&queue_th_comp);
761
762 if (!list_empty(&core->task_queue))
763 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
764 SAS_ADDR(sas_ha->sas_addr));
765
766 spin_lock_irqsave(&core->task_queue_lock, flags);
767 list_for_each_entry_safe(task, n, &core->task_queue, list) {
768 struct scsi_cmnd *cmd = task->uldd_task;
769
770 list_del_init(&task->list);
771
772 ASSIGN_SAS_TASK(cmd, NULL);
773 sas_free_task(task);
774 cmd->result = DID_ABORT << 16;
775 cmd->scsi_done(cmd);
776 }
777 spin_unlock_irqrestore(&core->task_queue_lock, flags);
778}
779
780EXPORT_SYMBOL_GPL(sas_queuecommand);
781EXPORT_SYMBOL_GPL(sas_target_alloc);
782EXPORT_SYMBOL_GPL(sas_slave_configure);
783EXPORT_SYMBOL_GPL(sas_slave_destroy);
784EXPORT_SYMBOL_GPL(sas_change_queue_depth);
785EXPORT_SYMBOL_GPL(sas_change_queue_type);
786EXPORT_SYMBOL_GPL(sas_bios_param);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 087c44539a16..3f7f5f8abd75 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -21,10 +21,12 @@
21 21
22struct lpfc_sli2_slim; 22struct lpfc_sli2_slim;
23 23
24#define LPFC_MAX_TARGET 256 /* max targets supported */
25#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
26#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
27 24
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
27 requests */
28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
29 the NameServer before giving up. */
28#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ 30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
29#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ 31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
30#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ 32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim;
41 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
42/* Provide maximum configuration definitions. */ 44/* Provide maximum configuration definitions. */
43#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
44#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
45#define FC_MAX_ADPTMSG 64 46#define FC_MAX_ADPTMSG 64
46 47
47#define MAX_HBAEVT 32 48#define MAX_HBAEVT 32
@@ -174,7 +175,6 @@ struct lpfc_hba {
174 dma_addr_t slim2p_mapping; 175 dma_addr_t slim2p_mapping;
175 uint16_t pci_cfg_value; 176 uint16_t pci_cfg_value;
176 177
177 struct semaphore hba_can_block;
178 int32_t hba_state; 178 int32_t hba_state;
179 179
180#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */ 180#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
@@ -285,6 +285,7 @@ struct lpfc_hba {
285 uint32_t cfg_log_verbose; 285 uint32_t cfg_log_verbose;
286 uint32_t cfg_lun_queue_depth; 286 uint32_t cfg_lun_queue_depth;
287 uint32_t cfg_nodev_tmo; 287 uint32_t cfg_nodev_tmo;
288 uint32_t cfg_devloss_tmo;
288 uint32_t cfg_hba_queue_depth; 289 uint32_t cfg_hba_queue_depth;
289 uint32_t cfg_fcp_class; 290 uint32_t cfg_fcp_class;
290 uint32_t cfg_use_adisc; 291 uint32_t cfg_use_adisc;
@@ -302,6 +303,9 @@ struct lpfc_hba {
302 uint32_t cfg_poll_tmo; 303 uint32_t cfg_poll_tmo;
303 uint32_t cfg_sg_seg_cnt; 304 uint32_t cfg_sg_seg_cnt;
304 uint32_t cfg_sg_dma_buf_size; 305 uint32_t cfg_sg_dma_buf_size;
306 uint64_t cfg_soft_wwpn;
307
308 uint32_t dev_loss_tmo_changed;
305 309
306 lpfc_vpd_t vpd; /* vital product data */ 310 lpfc_vpd_t vpd; /* vital product data */
307 311
@@ -351,6 +355,8 @@ struct lpfc_hba {
351#define VPD_PORT 0x8 /* valid vpd port data */ 355#define VPD_PORT 0x8 /* valid vpd port data */
352#define VPD_MASK 0xf /* mask for any vpd data */ 356#define VPD_MASK 0xf /* mask for any vpd data */
353 357
358 uint8_t soft_wwpn_enable;
359
354 struct timer_list fcp_poll_timer; 360 struct timer_list fcp_poll_timer;
355 struct timer_list els_tmofunc; 361 struct timer_list els_tmofunc;
356 362
@@ -391,3 +397,5 @@ struct rnidrsp {
391 struct list_head list; 397 struct list_head list;
392 uint32_t data; 398 uint32_t data;
393}; 399};
400
401#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b62a72dfab29..9496e87c135e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,9 @@
39#include "lpfc_compat.h" 39#include "lpfc_compat.h"
40#include "lpfc_crtn.h" 40#include "lpfc_crtn.h"
41 41
42#define LPFC_DEF_DEVLOSS_TMO 30
43#define LPFC_MIN_DEVLOSS_TMO 1
44#define LPFC_MAX_DEVLOSS_TMO 255
42 45
43static void 46static void
44lpfc_jedec_to_ascii(int incr, char hdw[]) 47lpfc_jedec_to_ascii(int incr, char hdw[])
@@ -219,8 +222,18 @@ lpfc_issue_lip(struct Scsi_Host *host)
219 return -ENOMEM; 222 return -ENOMEM;
220 223
221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 224 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
222 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); 225 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 226 pmboxq->mb.mbxOwner = OWN_HOST;
227
228 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
229
230 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
231 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
232 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
233 phba->cfg_link_speed);
234 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
235 phba->fc_ratov * 2);
236 }
224 237
225 if (mbxstatus == MBX_TIMEOUT) 238 if (mbxstatus == MBX_TIMEOUT)
226 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 239 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -233,51 +246,53 @@ lpfc_issue_lip(struct Scsi_Host *host)
233 return 0; 246 return 0;
234} 247}
235 248
236static ssize_t 249static int
237lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 250lpfc_selective_reset(struct lpfc_hba *phba)
238{ 251{
239 struct Scsi_Host *host = class_to_shost(cdev); 252 struct completion online_compl;
240 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 253 int status = 0;
241 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 254
255 init_completion(&online_compl);
256 lpfc_workq_post_event(phba, &status, &online_compl,
257 LPFC_EVT_OFFLINE);
258 wait_for_completion(&online_compl);
259
260 if (status != 0)
261 return -EIO;
262
263 init_completion(&online_compl);
264 lpfc_workq_post_event(phba, &status, &online_compl,
265 LPFC_EVT_ONLINE);
266 wait_for_completion(&online_compl);
267
268 if (status != 0)
269 return -EIO;
270
271 return 0;
242} 272}
243 273
244static ssize_t 274static ssize_t
245lpfc_board_online_show(struct class_device *cdev, char *buf) 275lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
246{ 276{
247 struct Scsi_Host *host = class_to_shost(cdev); 277 struct Scsi_Host *host = class_to_shost(cdev);
248 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 278 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
279 int status = -EINVAL;
249 280
250 if (phba->fc_flag & FC_OFFLINE_MODE) 281 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
251 return snprintf(buf, PAGE_SIZE, "0\n"); 282 status = lpfc_selective_reset(phba);
283
284 if (status == 0)
285 return strlen(buf);
252 else 286 else
253 return snprintf(buf, PAGE_SIZE, "1\n"); 287 return status;
254} 288}
255 289
256static ssize_t 290static ssize_t
257lpfc_board_online_store(struct class_device *cdev, const char *buf, 291lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
258 size_t count)
259{ 292{
260 struct Scsi_Host *host = class_to_shost(cdev); 293 struct Scsi_Host *host = class_to_shost(cdev);
261 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 294 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
262 struct completion online_compl; 295 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
263 int val=0, status=0;
264
265 if (sscanf(buf, "%d", &val) != 1)
266 return -EINVAL;
267
268 init_completion(&online_compl);
269
270 if (val)
271 lpfc_workq_post_event(phba, &status, &online_compl,
272 LPFC_EVT_ONLINE);
273 else
274 lpfc_workq_post_event(phba, &status, &online_compl,
275 LPFC_EVT_OFFLINE);
276 wait_for_completion(&online_compl);
277 if (!status)
278 return strlen(buf);
279 else
280 return -EIO;
281} 296}
282 297
283static ssize_t 298static ssize_t
@@ -532,10 +547,122 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
532 NULL); 547 NULL);
533static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 548static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
534 NULL); 549 NULL);
535static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
536 lpfc_board_online_show, lpfc_board_online_store);
537static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 550static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
538 lpfc_board_mode_show, lpfc_board_mode_store); 551 lpfc_board_mode_show, lpfc_board_mode_store);
552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
553
554
555static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
556
557static ssize_t
558lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
559 size_t count)
560{
561 struct Scsi_Host *host = class_to_shost(cdev);
562 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
563 unsigned int cnt = count;
564
565 /*
566 * We're doing a simple sanity check for soft_wwpn setting.
567 * We require that the user write a specific key to enable
568 * the soft_wwpn attribute to be settable. Once the attribute
569 * is written, the enable key resets. If further updates are
570 * desired, the key must be written again to re-enable the
571 * attribute.
572 *
573 * The "key" is not secret - it is a hardcoded string shown
574 * here. The intent is to protect against the random user or
575 * application that is just writing attributes.
576 */
577
578 /* count may include a LF at end of string */
579 if (buf[cnt-1] == '\n')
580 cnt--;
581
582 if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
583 (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
584 return -EINVAL;
585
586 phba->soft_wwpn_enable = 1;
587 return count;
588}
589static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
590 lpfc_soft_wwpn_enable_store);
591
592static ssize_t
593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
594{
595 struct Scsi_Host *host = class_to_shost(cdev);
596 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
597 return snprintf(buf, PAGE_SIZE, "0x%llx\n", phba->cfg_soft_wwpn);
598}
599
600
601static ssize_t
602lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
603{
604 struct Scsi_Host *host = class_to_shost(cdev);
605 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
606 struct completion online_compl;
607 int stat1=0, stat2=0;
608 unsigned int i, j, cnt=count;
609 u8 wwpn[8];
610
611 /* count may include a LF at end of string */
612 if (buf[cnt-1] == '\n')
613 cnt--;
614
615 if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
616 ((cnt == 17) && (*buf++ != 'x')) ||
617 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
618 return -EINVAL;
619
620 phba->soft_wwpn_enable = 0;
621
622 memset(wwpn, 0, sizeof(wwpn));
623
624 /* Validate and store the new name */
625 for (i=0, j=0; i < 16; i++) {
626 if ((*buf >= 'a') && (*buf <= 'f'))
627 j = ((j << 4) | ((*buf++ -'a') + 10));
628 else if ((*buf >= 'A') && (*buf <= 'F'))
629 j = ((j << 4) | ((*buf++ -'A') + 10));
630 else if ((*buf >= '0') && (*buf <= '9'))
631 j = ((j << 4) | (*buf++ -'0'));
632 else
633 return -EINVAL;
634 if (i % 2) {
635 wwpn[i/2] = j & 0xff;
636 j = 0;
637 }
638 }
639 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
640 fc_host_port_name(host) = phba->cfg_soft_wwpn;
641
642 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
643 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
644
645 init_completion(&online_compl);
646 lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE);
647 wait_for_completion(&online_compl);
648 if (stat1)
649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
650 "%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
651 "adapter - %d\n", phba->brd_no, stat1);
652
653 init_completion(&online_compl);
654 lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
655 wait_for_completion(&online_compl);
656 if (stat2)
657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658 "%d:0464 lpfc_soft_wwpn attribute set failed to reinit "
659 "adapter - %d\n", phba->brd_no, stat2);
660
661 return (stat1 || stat2) ? -EIO : count;
662}
663static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
664 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
665
539 666
540static int lpfc_poll = 0; 667static int lpfc_poll = 0;
541module_param(lpfc_poll, int, 0); 668module_param(lpfc_poll, int, 0);
@@ -548,6 +675,123 @@ static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
548 lpfc_poll_show, lpfc_poll_store); 675 lpfc_poll_show, lpfc_poll_store);
549 676
550/* 677/*
678# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
679# until the timer expires. Value range is [0,255]. Default value is 30.
680*/
681static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
682static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
683module_param(lpfc_nodev_tmo, int, 0);
684MODULE_PARM_DESC(lpfc_nodev_tmo,
685 "Seconds driver will hold I/O waiting "
686 "for a device to come back");
687static ssize_t
688lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
689{
690 struct Scsi_Host *host = class_to_shost(cdev);
691 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
692 int val = 0;
693 val = phba->cfg_devloss_tmo;
694 return snprintf(buf, PAGE_SIZE, "%d\n",
695 phba->cfg_devloss_tmo);
696}
697
698static int
699lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
700{
701 static int warned;
702 if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
703 phba->cfg_nodev_tmo = phba->cfg_devloss_tmo;
704 if (!warned && val != LPFC_DEF_DEVLOSS_TMO) {
705 warned = 1;
706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
707 "%d:0402 Ignoring nodev_tmo module "
708 "parameter because devloss_tmo is"
709 " set.\n",
710 phba->brd_no);
711 }
712 return 0;
713 }
714
715 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
716 phba->cfg_nodev_tmo = val;
717 phba->cfg_devloss_tmo = val;
718 return 0;
719 }
720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
721 "%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, "
722 "allowed range is [%d, %d]\n",
723 phba->brd_no, val,
724 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
725 phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
726 return -EINVAL;
727}
728
729static int
730lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
731{
732 if (phba->dev_loss_tmo_changed ||
733 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
735 "%d:0401 Ignoring change to nodev_tmo "
736 "because devloss_tmo is set.\n",
737 phba->brd_no);
738 return 0;
739 }
740
741 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
742 phba->cfg_nodev_tmo = val;
743 phba->cfg_devloss_tmo = val;
744 return 0;
745 }
746
747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
748 "%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, "
749 "allowed range is [%d, %d]\n",
750 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
751 LPFC_MAX_DEVLOSS_TMO);
752 return -EINVAL;
753}
754
755lpfc_param_store(nodev_tmo)
756
757static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
758 lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
759
760/*
761# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
762# disappear until the timer expires. Value range is [0,255]. Default
763# value is 30.
764*/
765module_param(lpfc_devloss_tmo, int, 0);
766MODULE_PARM_DESC(lpfc_devloss_tmo,
767 "Seconds driver will hold I/O waiting "
768 "for a device to come back");
769lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
770 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
771lpfc_param_show(devloss_tmo)
772static int
773lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
774{
775 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
776 phba->cfg_nodev_tmo = val;
777 phba->cfg_devloss_tmo = val;
778 phba->dev_loss_tmo_changed = 1;
779 return 0;
780 }
781
782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
783 "%d:0404 lpfc_devloss_tmo attribute cannot be set to"
784 " %d, allowed range is [%d, %d]\n",
785 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
786 LPFC_MAX_DEVLOSS_TMO);
787 return -EINVAL;
788}
789
790lpfc_param_store(devloss_tmo)
791static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
792 lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
793
794/*
551# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 795# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
552# deluged with LOTS of information. 796# deluged with LOTS of information.
553# You can set a bit mask to record specific types of verbose messages: 797# You can set a bit mask to record specific types of verbose messages:
@@ -606,14 +850,6 @@ LPFC_ATTR_R(scan_down, 1, 0, 1,
606 "Start scanning for devices from highest ALPA to lowest"); 850 "Start scanning for devices from highest ALPA to lowest");
607 851
608/* 852/*
609# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
610# until the timer expires. Value range is [0,255]. Default value is 30.
611# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
612*/
613LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
614 "Seconds driver will hold I/O waiting for a device to come back");
615
616/*
617# lpfc_topology: link topology for init link 853# lpfc_topology: link topology for init link
618# 0x0 = attempt loop mode then point-to-point 854# 0x0 = attempt loop mode then point-to-point
619# 0x01 = internal loopback mode 855# 0x01 = internal loopback mode
@@ -695,12 +931,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
695 "during discovery"); 931 "during discovery");
696 932
697/* 933/*
698# lpfc_max_luns: maximum number of LUNs per target driver will support 934# lpfc_max_luns: maximum allowed LUN.
699# Value range is [1,32768]. Default value is 256. 935# Value range is [0,65535]. Default value is 255.
700# NOTE: The SCSI layer will scan each target for this many luns 936# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
701*/ 937*/
702LPFC_ATTR_R(max_luns, 256, 1, 32768, 938LPFC_ATTR_R(max_luns, 255, 0, 65535,
703 "Maximum number of LUNs per target driver will support"); 939 "Maximum allowed LUN");
704 940
705/* 941/*
706# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 942# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@@ -709,6 +945,7 @@ LPFC_ATTR_R(max_luns, 256, 1, 32768,
709LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 945LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
710 "Milliseconds driver will wait between polling FCP ring"); 946 "Milliseconds driver will wait between polling FCP ring");
711 947
948
712struct class_device_attribute *lpfc_host_attrs[] = { 949struct class_device_attribute *lpfc_host_attrs[] = {
713 &class_device_attr_info, 950 &class_device_attr_info,
714 &class_device_attr_serialnum, 951 &class_device_attr_serialnum,
@@ -726,6 +963,7 @@ struct class_device_attribute *lpfc_host_attrs[] = {
726 &class_device_attr_lpfc_lun_queue_depth, 963 &class_device_attr_lpfc_lun_queue_depth,
727 &class_device_attr_lpfc_hba_queue_depth, 964 &class_device_attr_lpfc_hba_queue_depth,
728 &class_device_attr_lpfc_nodev_tmo, 965 &class_device_attr_lpfc_nodev_tmo,
966 &class_device_attr_lpfc_devloss_tmo,
729 &class_device_attr_lpfc_fcp_class, 967 &class_device_attr_lpfc_fcp_class,
730 &class_device_attr_lpfc_use_adisc, 968 &class_device_attr_lpfc_use_adisc,
731 &class_device_attr_lpfc_ack0, 969 &class_device_attr_lpfc_ack0,
@@ -739,10 +977,12 @@ struct class_device_attribute *lpfc_host_attrs[] = {
739 &class_device_attr_lpfc_max_luns, 977 &class_device_attr_lpfc_max_luns,
740 &class_device_attr_nport_evt_cnt, 978 &class_device_attr_nport_evt_cnt,
741 &class_device_attr_management_version, 979 &class_device_attr_management_version,
742 &class_device_attr_board_online,
743 &class_device_attr_board_mode, 980 &class_device_attr_board_mode,
981 &class_device_attr_issue_reset,
744 &class_device_attr_lpfc_poll, 982 &class_device_attr_lpfc_poll,
745 &class_device_attr_lpfc_poll_tmo, 983 &class_device_attr_lpfc_poll_tmo,
984 &class_device_attr_lpfc_soft_wwpn,
985 &class_device_attr_lpfc_soft_wwpn_enable,
746 NULL, 986 NULL,
747}; 987};
748 988
@@ -873,7 +1113,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
873 phba->sysfs_mbox.mbox == NULL ) { 1113 phba->sysfs_mbox.mbox == NULL ) {
874 sysfs_mbox_idle(phba); 1114 sysfs_mbox_idle(phba);
875 spin_unlock_irq(host->host_lock); 1115 spin_unlock_irq(host->host_lock);
876 return -EINVAL; 1116 return -EAGAIN;
877 } 1117 }
878 } 1118 }
879 1119
@@ -989,14 +1229,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
989 spin_unlock_irq(phba->host->host_lock); 1229 spin_unlock_irq(phba->host->host_lock);
990 rc = lpfc_sli_issue_mbox_wait (phba, 1230 rc = lpfc_sli_issue_mbox_wait (phba,
991 phba->sysfs_mbox.mbox, 1231 phba->sysfs_mbox.mbox,
992 phba->fc_ratov * 2); 1232 lpfc_mbox_tmo_val(phba,
1233 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
993 spin_lock_irq(phba->host->host_lock); 1234 spin_lock_irq(phba->host->host_lock);
994 } 1235 }
995 1236
996 if (rc != MBX_SUCCESS) { 1237 if (rc != MBX_SUCCESS) {
997 sysfs_mbox_idle(phba); 1238 sysfs_mbox_idle(phba);
998 spin_unlock_irq(host->host_lock); 1239 spin_unlock_irq(host->host_lock);
999 return -ENODEV; 1240 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1000 } 1241 }
1001 phba->sysfs_mbox.state = SMBOX_READING; 1242 phba->sysfs_mbox.state = SMBOX_READING;
1002 } 1243 }
@@ -1005,7 +1246,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1005 printk(KERN_WARNING "mbox_read: Bad State\n"); 1246 printk(KERN_WARNING "mbox_read: Bad State\n");
1006 sysfs_mbox_idle(phba); 1247 sysfs_mbox_idle(phba);
1007 spin_unlock_irq(host->host_lock); 1248 spin_unlock_irq(host->host_lock);
1008 return -EINVAL; 1249 return -EAGAIN;
1009 } 1250 }
1010 1251
1011 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 1252 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1192,6 +1433,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
1192 fc_host_fabric_name(shost) = node_name; 1433 fc_host_fabric_name(shost) = node_name;
1193} 1434}
1194 1435
1436static void
1437lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
1438{
1439 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
1440
1441 spin_lock_irq(shost->host_lock);
1442 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1443 spin_unlock_irq(shost->host_lock);
1444}
1195 1445
1196static struct fc_host_statistics * 1446static struct fc_host_statistics *
1197lpfc_get_stats(struct Scsi_Host *shost) 1447lpfc_get_stats(struct Scsi_Host *shost)
@@ -1199,8 +1449,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
1199 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1449 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1200 struct lpfc_sli *psli = &phba->sli; 1450 struct lpfc_sli *psli = &phba->sli;
1201 struct fc_host_statistics *hs = &phba->link_stats; 1451 struct fc_host_statistics *hs = &phba->link_stats;
1452 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1202 LPFC_MBOXQ_t *pmboxq; 1453 LPFC_MBOXQ_t *pmboxq;
1203 MAILBOX_t *pmb; 1454 MAILBOX_t *pmb;
1455 unsigned long seconds;
1204 int rc = 0; 1456 int rc = 0;
1205 1457
1206 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1458 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1261,22 +1513,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
1261 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 1513 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1262 hs->error_frames = pmb->un.varRdLnk.crcCnt; 1514 hs->error_frames = pmb->un.varRdLnk.crcCnt;
1263 1515
1516 hs->link_failure_count -= lso->link_failure_count;
1517 hs->loss_of_sync_count -= lso->loss_of_sync_count;
1518 hs->loss_of_signal_count -= lso->loss_of_signal_count;
1519 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
1520 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
1521 hs->invalid_crc_count -= lso->invalid_crc_count;
1522 hs->error_frames -= lso->error_frames;
1523
1264 if (phba->fc_topology == TOPOLOGY_LOOP) { 1524 if (phba->fc_topology == TOPOLOGY_LOOP) {
1265 hs->lip_count = (phba->fc_eventTag >> 1); 1525 hs->lip_count = (phba->fc_eventTag >> 1);
1526 hs->lip_count -= lso->link_events;
1266 hs->nos_count = -1; 1527 hs->nos_count = -1;
1267 } else { 1528 } else {
1268 hs->lip_count = -1; 1529 hs->lip_count = -1;
1269 hs->nos_count = (phba->fc_eventTag >> 1); 1530 hs->nos_count = (phba->fc_eventTag >> 1);
1531 hs->nos_count -= lso->link_events;
1270 } 1532 }
1271 1533
1272 hs->dumped_frames = -1; 1534 hs->dumped_frames = -1;
1273 1535
1274/* FIX ME */ 1536 seconds = get_seconds();
1275 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ 1537 if (seconds < psli->stats_start)
1538 hs->seconds_since_last_reset = seconds +
1539 ((unsigned long)-1 - psli->stats_start);
1540 else
1541 hs->seconds_since_last_reset = seconds - psli->stats_start;
1276 1542
1277 return hs; 1543 return hs;
1278} 1544}
1279 1545
1546static void
1547lpfc_reset_stats(struct Scsi_Host *shost)
1548{
1549 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1550 struct lpfc_sli *psli = &phba->sli;
1551 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1552 LPFC_MBOXQ_t *pmboxq;
1553 MAILBOX_t *pmb;
1554 int rc = 0;
1555
1556 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1557 if (!pmboxq)
1558 return;
1559 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1560
1561 pmb = &pmboxq->mb;
1562 pmb->mbxCommand = MBX_READ_STATUS;
1563 pmb->mbxOwner = OWN_HOST;
1564 pmb->un.varWords[0] = 0x1; /* reset request */
1565 pmboxq->context1 = NULL;
1566
1567 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1568 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1569 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1570 else
1571 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1572
1573 if (rc != MBX_SUCCESS) {
1574 if (rc == MBX_TIMEOUT)
1575 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1576 else
1577 mempool_free(pmboxq, phba->mbox_mem_pool);
1578 return;
1579 }
1580
1581 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1582 pmb->mbxCommand = MBX_READ_LNK_STAT;
1583 pmb->mbxOwner = OWN_HOST;
1584 pmboxq->context1 = NULL;
1585
1586 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1587 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1588 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1589 else
1590 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1591
1592 if (rc != MBX_SUCCESS) {
1593 if (rc == MBX_TIMEOUT)
1594 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1595 else
1596 mempool_free( pmboxq, phba->mbox_mem_pool);
1597 return;
1598 }
1599
1600 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
1601 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
1602 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
1603 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
1604 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
1605 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1606 lso->error_frames = pmb->un.varRdLnk.crcCnt;
1607 lso->link_events = (phba->fc_eventTag >> 1);
1608
1609 psli->stats_start = get_seconds();
1610
1611 return;
1612}
1280 1613
1281/* 1614/*
1282 * The LPFC driver treats linkdown handling as target loss events so there 1615 * The LPFC driver treats linkdown handling as target loss events so there
@@ -1346,27 +1679,12 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1346} 1679}
1347 1680
1348static void 1681static void
1349lpfc_get_rport_loss_tmo(struct fc_rport *rport)
1350{
1351 /*
1352 * Return the driver's global value for device loss timeout plus
1353 * five seconds to allow the driver's nodev timer to run.
1354 */
1355 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1356}
1357
1358static void
1359lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1682lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1360{ 1683{
1361 /*
1362 * The driver doesn't have a per-target timeout setting. Set
1363 * this value globally. lpfc_nodev_tmo should be greater then 0.
1364 */
1365 if (timeout) 1684 if (timeout)
1366 lpfc_nodev_tmo = timeout; 1685 rport->dev_loss_tmo = timeout;
1367 else 1686 else
1368 lpfc_nodev_tmo = 1; 1687 rport->dev_loss_tmo = 1;
1369 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1370} 1688}
1371 1689
1372 1690
@@ -1391,7 +1709,6 @@ struct fc_function_template lpfc_transport_functions = {
1391 .show_host_port_name = 1, 1709 .show_host_port_name = 1,
1392 .show_host_supported_classes = 1, 1710 .show_host_supported_classes = 1,
1393 .show_host_supported_fc4s = 1, 1711 .show_host_supported_fc4s = 1,
1394 .show_host_symbolic_name = 1,
1395 .show_host_supported_speeds = 1, 1712 .show_host_supported_speeds = 1,
1396 .show_host_maxframe_size = 1, 1713 .show_host_maxframe_size = 1,
1397 1714
@@ -1414,20 +1731,21 @@ struct fc_function_template lpfc_transport_functions = {
1414 .get_host_fabric_name = lpfc_get_host_fabric_name, 1731 .get_host_fabric_name = lpfc_get_host_fabric_name,
1415 .show_host_fabric_name = 1, 1732 .show_host_fabric_name = 1,
1416 1733
1734 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
1735 .show_host_symbolic_name = 1,
1736
1417 /* 1737 /*
1418 * The LPFC driver treats linkdown handling as target loss events 1738 * The LPFC driver treats linkdown handling as target loss events
1419 * so there are no sysfs handlers for link_down_tmo. 1739 * so there are no sysfs handlers for link_down_tmo.
1420 */ 1740 */
1421 1741
1422 .get_fc_host_stats = lpfc_get_stats, 1742 .get_fc_host_stats = lpfc_get_stats,
1423 1743 .reset_fc_host_stats = lpfc_reset_stats,
1424 /* the LPFC driver doesn't support resetting stats yet */
1425 1744
1426 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 1745 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
1427 .show_rport_maxframe_size = 1, 1746 .show_rport_maxframe_size = 1,
1428 .show_rport_supported_classes = 1, 1747 .show_rport_supported_classes = 1,
1429 1748
1430 .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
1431 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 1749 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
1432 .show_rport_dev_loss_tmo = 1, 1750 .show_rport_dev_loss_tmo = 1,
1433 1751
@@ -1441,6 +1759,8 @@ struct fc_function_template lpfc_transport_functions = {
1441 .show_starget_port_name = 1, 1759 .show_starget_port_name = 1,
1442 1760
1443 .issue_fc_host_lip = lpfc_issue_lip, 1761 .issue_fc_host_lip = lpfc_issue_lip,
1762 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1763 .terminate_rport_io = lpfc_terminate_rport_io,
1444}; 1764};
1445 1765
1446void 1766void
@@ -1456,14 +1776,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1456 lpfc_ack0_init(phba, lpfc_ack0); 1776 lpfc_ack0_init(phba, lpfc_ack0);
1457 lpfc_topology_init(phba, lpfc_topology); 1777 lpfc_topology_init(phba, lpfc_topology);
1458 lpfc_scan_down_init(phba, lpfc_scan_down); 1778 lpfc_scan_down_init(phba, lpfc_scan_down);
1459 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1460 lpfc_link_speed_init(phba, lpfc_link_speed); 1779 lpfc_link_speed_init(phba, lpfc_link_speed);
1461 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 1780 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1462 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1781 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1463 lpfc_max_luns_init(phba, lpfc_max_luns); 1782 lpfc_max_luns_init(phba, lpfc_max_luns);
1464 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1783 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1465 1784 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1785 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1466 phba->cfg_poll = lpfc_poll; 1786 phba->cfg_poll = lpfc_poll;
1787 phba->cfg_soft_wwpn = 0L;
1467 1788
1468 /* 1789 /*
1469 * The total number of segments is the configuration value plus 2 1790 * The total number of segments is the configuration value plus 2
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee22173fce43..3d684496acde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,6 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21struct fc_rport;
21void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 22void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
22void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 23void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
23int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 24int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
@@ -127,6 +128,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 128void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 129void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 130LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
131int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
130 132
131int lpfc_mem_alloc(struct lpfc_hba *); 133int lpfc_mem_alloc(struct lpfc_hba *);
132void lpfc_mem_free(struct lpfc_hba *); 134void lpfc_mem_free(struct lpfc_hba *);
@@ -147,6 +149,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *);
147int lpfc_sli_hba_down(struct lpfc_hba *); 149int lpfc_sli_hba_down(struct lpfc_hba *);
148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 150int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
149int lpfc_sli_handle_mb_event(struct lpfc_hba *); 151int lpfc_sli_handle_mb_event(struct lpfc_hba *);
152int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
150int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 153int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
151 struct lpfc_sli_ring *, uint32_t); 154 struct lpfc_sli_ring *, uint32_t);
152void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 155void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -198,6 +201,8 @@ extern struct scsi_host_template lpfc_template;
198extern struct fc_function_template lpfc_transport_functions; 201extern struct fc_function_template lpfc_transport_functions;
199 202
200void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 203void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
204void lpfc_terminate_rport_io(struct fc_rport *);
205void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
201 206
202#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 207#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
203#define HBA_EVENT_RSCN 5 208#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57af53e..ae4106458991 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
131 } 131 }
132 132
133ct_unsol_event_exit_piocbq: 133ct_unsol_event_exit_piocbq:
134 list_del(&head);
134 if (pmbuf) { 135 if (pmbuf) {
135 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { 136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
136 lpfc_mbuf_free(phba, matp->virt, matp->phys); 137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -323,7 +324,6 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
323 struct lpfc_sli_ct_request *Response = 324 struct lpfc_sli_ct_request *Response =
324 (struct lpfc_sli_ct_request *) mp->virt; 325 (struct lpfc_sli_ct_request *) mp->virt;
325 struct lpfc_nodelist *ndlp = NULL; 326 struct lpfc_nodelist *ndlp = NULL;
326 struct lpfc_nodelist *next_ndlp;
327 struct lpfc_dmabuf *mlast, *next_mp; 327 struct lpfc_dmabuf *mlast, *next_mp;
328 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 328 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
329 uint32_t Did; 329 uint32_t Did;
@@ -398,30 +398,6 @@ nsout1:
398 * current driver state. 398 * current driver state.
399 */ 399 */
400 if (phba->hba_state == LPFC_HBA_READY) { 400 if (phba->hba_state == LPFC_HBA_READY) {
401
402 /*
403 * Switch ports that connect a loop of multiple targets need
404 * special consideration. The driver wants to unregister the
405 * rpi only on the target that was pulled from the loop. On
406 * RSCN, the driver wants to rediscover an NPort only if the
407 * driver flagged it as NLP_NPR_2B_DISC. Provided adisc is
408 * not enabled and the NPort is not capable of retransmissions
409 * (FC Tape) prevent timing races with the scsi error handler by
410 * unregistering the Nport's RPI. This action causes all
411 * outstanding IO to flush back to the midlayer.
412 */
413 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
414 nlp_listp) {
415 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
416 (lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
417 if ((phba->cfg_use_adisc == 0) &&
418 !(ndlp->nlp_fcp_info &
419 NLP_FCP_2_DEVICE)) {
420 lpfc_unreg_rpi(phba, ndlp);
421 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
422 }
423 }
424 }
425 lpfc_els_flush_rscn(phba); 401 lpfc_els_flush_rscn(phba);
426 spin_lock_irq(phba->host->host_lock); 402 spin_lock_irq(phba->host->host_lock);
427 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ 403 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
@@ -481,7 +457,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
481 if (CTrsp->CommandResponse.bits.CmdRsp == 457 if (CTrsp->CommandResponse.bits.CmdRsp ==
482 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 458 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
483 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 459 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
484 "%d:0239 NameServer Rsp " 460 "%d:0208 NameServer Rsp "
485 "Data: x%x\n", 461 "Data: x%x\n",
486 phba->brd_no, 462 phba->brd_no,
487 phba->fc_flag); 463 phba->fc_flag);
@@ -588,13 +564,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
588 564
589 lpfc_decode_firmware_rev(phba, fwrev, 0); 565 lpfc_decode_firmware_rev(phba, fwrev, 0);
590 566
591 if (phba->Port[0]) { 567 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
592 sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, 568 fwrev, lpfc_release_version);
593 phba->Port, fwrev, lpfc_release_version); 569 return;
594 } else {
595 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
596 fwrev, lpfc_release_version);
597 }
598} 570}
599 571
600/* 572/*
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 41cf5d3ea6ce..9766f909c9c6 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -30,7 +30,6 @@
30 30
31/* worker thread events */ 31/* worker thread events */
32enum lpfc_work_type { 32enum lpfc_work_type {
33 LPFC_EVT_NODEV_TMO,
34 LPFC_EVT_ONLINE, 33 LPFC_EVT_ONLINE,
35 LPFC_EVT_OFFLINE, 34 LPFC_EVT_OFFLINE,
36 LPFC_EVT_WARM_START, 35 LPFC_EVT_WARM_START,
@@ -74,11 +73,9 @@ struct lpfc_nodelist {
74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
75 74
76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
78 struct fc_rport *rport; /* Corresponding FC transport 76 struct fc_rport *rport; /* Corresponding FC transport
79 port structure */ 77 port structure */
80 struct lpfc_hba *nlp_phba; 78 struct lpfc_hba *nlp_phba;
81 struct lpfc_work_evt nodev_timeout_evt;
82 struct lpfc_work_evt els_retry_evt; 79 struct lpfc_work_evt els_retry_evt;
83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
84 unsigned long last_q_full_time; /* jiffy of last queue full */ 81 unsigned long last_q_full_time; /* jiffy of last queue full */
@@ -102,7 +99,6 @@ struct lpfc_nodelist {
102#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 99#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
103#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 100#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
104#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 101#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
105#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
106#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 102#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
107#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 103#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
108#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 104#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
@@ -169,7 +165,7 @@ struct lpfc_nodelist {
169 */ 165 */
170/* 166/*
171 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 167 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
172 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 168 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
173 * expire, all effected nodes will receive a DEVICE_RM event. 169 * expire, all effected nodes will receive a DEVICE_RM event.
174 */ 170 */
175/* 171/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 283b7d824c34..71864cdc6c71 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba)
648} 648}
649 649
650static struct lpfc_nodelist * 650static struct lpfc_nodelist *
651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
652 struct lpfc_nodelist *ndlp) 652 struct lpfc_nodelist *ndlp)
653{ 653{
654 struct lpfc_nodelist *new_ndlp; 654 struct lpfc_nodelist *new_ndlp;
655 struct lpfc_dmabuf *pcmd, *prsp;
656 uint32_t *lp; 655 uint32_t *lp;
657 struct serv_parm *sp; 656 struct serv_parm *sp;
658 uint8_t name[sizeof (struct lpfc_name)]; 657 uint8_t name[sizeof (struct lpfc_name)];
659 uint32_t rc; 658 uint32_t rc;
660 659
661 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
662 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
663 lp = (uint32_t *) prsp->virt; 660 lp = (uint32_t *) prsp->virt;
664 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name));
665 663
666 /* Now we to find out if the NPort we are logging into, matches the WWPN 664 /* Now we to find out if the NPort we are logging into, matches the WWPN
667 * we have for that ndlp. If not, we have some work to do. 665 * we have for that ndlp. If not, we have some work to do.
668 */ 666 */
669 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
670 668
671 memset(name, 0, sizeof (struct lpfc_name)); 669 if (new_ndlp == ndlp)
672 rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
673 if (!rc || (new_ndlp == ndlp)) {
674 return ndlp; 670 return ndlp;
675 }
676 671
677 if (!new_ndlp) { 672 if (!new_ndlp) {
673 rc =
674 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
675 if (!rc)
676 return ndlp;
678 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
679 if (!new_ndlp) 678 if (!new_ndlp)
680 return ndlp; 679 return ndlp;
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
683 } 682 }
684 683
685 lpfc_unreg_rpi(phba, new_ndlp); 684 lpfc_unreg_rpi(phba, new_ndlp);
686 new_ndlp->nlp_prev_state = ndlp->nlp_state;
687 new_ndlp->nlp_DID = ndlp->nlp_DID; 685 new_ndlp->nlp_DID = ndlp->nlp_DID;
688 new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 686 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
689 lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); 687 new_ndlp->nlp_state = ndlp->nlp_state;
688 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
690 689
691 /* Move this back to NPR list */ 690 /* Move this back to NPR list */
692 lpfc_unreg_rpi(phba, ndlp); 691 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
693 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 692 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
694 ndlp->nlp_state = NLP_STE_NPR_NODE; 693 }
695 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 694 else {
696 695 lpfc_unreg_rpi(phba, ndlp);
696 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
699 }
697 return new_ndlp; 700 return new_ndlp;
698} 701}
699 702
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
703{ 706{
704 IOCB_t *irsp; 707 IOCB_t *irsp;
705 struct lpfc_nodelist *ndlp; 708 struct lpfc_nodelist *ndlp;
709 struct lpfc_dmabuf *prsp;
706 int disc, rc, did, type; 710 int disc, rc, did, type;
707 711
708 712
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
769 } 773 }
770 } else { 774 } else {
771 /* Good status, call state machine */ 775 /* Good status, call state machine */
772 ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); 776 prsp = list_entry(((struct lpfc_dmabuf *)
777 cmdiocb->context2)->list.next,
778 struct lpfc_dmabuf, list);
779 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
773 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 780 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
774 NLP_EVT_CMPL_PLOGI); 781 NLP_EVT_CMPL_PLOGI);
775 } 782 }
@@ -821,7 +828,7 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
821 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 828 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
822 829
823 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 830 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
824 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, 0, did, 831 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did,
825 ELS_CMD_PLOGI); 832 ELS_CMD_PLOGI);
826 if (!elsiocb) 833 if (!elsiocb)
827 return 1; 834 return 1;
@@ -1841,9 +1848,12 @@ static void
1841lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1842 struct lpfc_iocbq * rspiocb) 1849 struct lpfc_iocbq * rspiocb)
1843{ 1850{
1851 IOCB_t *irsp;
1844 struct lpfc_nodelist *ndlp; 1852 struct lpfc_nodelist *ndlp;
1845 LPFC_MBOXQ_t *mbox = NULL; 1853 LPFC_MBOXQ_t *mbox = NULL;
1846 1854
1855 irsp = &rspiocb->iocb;
1856
1847 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1857 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1848 if (cmdiocb->context_un.mbox) 1858 if (cmdiocb->context_un.mbox)
1849 mbox = cmdiocb->context_un.mbox; 1859 mbox = cmdiocb->context_un.mbox;
@@ -1886,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1886 mempool_free( mbox, phba->mbox_mem_pool); 1896 mempool_free( mbox, phba->mbox_mem_pool);
1887 } else { 1897 } else {
1888 mempool_free( mbox, phba->mbox_mem_pool); 1898 mempool_free( mbox, phba->mbox_mem_pool);
1889 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1890 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1891 ndlp = NULL; 1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1902 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1903 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1904 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1905 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1906 ndlp = NULL;
1907 }
1892 } 1908 }
1893 } 1909 }
1894 } 1910 }
@@ -2490,6 +2506,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2490 uint32_t *lp; 2506 uint32_t *lp;
2491 IOCB_t *icmd; 2507 IOCB_t *icmd;
2492 uint32_t payload_len, cmd; 2508 uint32_t payload_len, cmd;
2509 int i;
2493 2510
2494 icmd = &cmdiocb->iocb; 2511 icmd = &cmdiocb->iocb;
2495 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2508,6 +2525,10 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2508 phba->brd_no, 2525 phba->brd_no,
2509 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt); 2526 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2510 2527
2528 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2529 fc_host_post_event(phba->host, fc_get_event_number(),
2530 FCH_EVT_RSCN, lp[i]);
2531
2511 /* If we are about to begin discovery, just ACC the RSCN. 2532 /* If we are about to begin discovery, just ACC the RSCN.
2512 * Discovery processing will satisfy it. 2533 * Discovery processing will satisfy it.
2513 */ 2534 */
@@ -2791,8 +2812,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2791 2812
2792 ndlp = (struct lpfc_nodelist *) pmb->context2; 2813 ndlp = (struct lpfc_nodelist *) pmb->context2;
2793 xri = (uint16_t) ((unsigned long)(pmb->context1)); 2814 xri = (uint16_t) ((unsigned long)(pmb->context1));
2794 pmb->context1 = 0; 2815 pmb->context1 = NULL;
2795 pmb->context2 = 0; 2816 pmb->context2 = NULL;
2796 2817
2797 if (mb->mbxStatus) { 2818 if (mb->mbxStatus) {
2798 mempool_free( pmb, phba->mbox_mem_pool); 2819 mempool_free( pmb, phba->mbox_mem_pool);
@@ -2832,7 +2853,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2832 2853
2833 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 2854 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2834 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2855 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2835 "%d:0128 Xmit ELS RPS ACC response tag x%x " 2856 "%d:0118 Xmit ELS RPS ACC response tag x%x "
2836 "Data: x%x x%x x%x x%x x%x\n", 2857 "Data: x%x x%x x%x x%x x%x\n",
2837 phba->brd_no, 2858 phba->brd_no,
2838 elsiocb->iocb.ulpIoTag, 2859 elsiocb->iocb.ulpIoTag,
@@ -2941,7 +2962,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2941 2962
2942 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 2963 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2943 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2964 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2944 "%d:0128 Xmit ELS RPL ACC response tag x%x " 2965 "%d:0120 Xmit ELS RPL ACC response tag x%x "
2945 "Data: x%x x%x x%x x%x x%x\n", 2966 "Data: x%x x%x x%x x%x x%x\n",
2946 phba->brd_no, 2967 phba->brd_no,
2947 elsiocb->iocb.ulpIoTag, 2968 elsiocb->iocb.ulpIoTag,
@@ -3102,7 +3123,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3102 struct lpfc_nodelist *ndlp, *next_ndlp; 3123 struct lpfc_nodelist *ndlp, *next_ndlp;
3103 3124
3104 /* FAN received */ 3125 /* FAN received */
3105 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", 3126 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
3106 phba->brd_no); 3127 phba->brd_no);
3107 3128
3108 icmd = &cmdiocb->iocb; 3129 icmd = &cmdiocb->iocb;
@@ -3282,10 +3303,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3282 } else 3303 } else
3283 lpfc_sli_release_iocbq(phba, piocb); 3304 lpfc_sli_release_iocbq(phba, piocb);
3284 } 3305 }
3285 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 3306 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3286 phba->els_tmofunc.expires = jiffies + HZ * timeout; 3307 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3287 add_timer(&phba->els_tmofunc); 3308
3288 }
3289 spin_unlock_irq(phba->host->host_lock); 3309 spin_unlock_irq(phba->host->host_lock);
3290} 3310}
3291 3311
@@ -3442,6 +3462,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3442 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3462 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3443 ndlp->nlp_type |= NLP_FABRIC; 3463 ndlp->nlp_type |= NLP_FABRIC;
3444 } 3464 }
3465 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
3466 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3445 } 3467 }
3446 3468
3447 phba->fc_stat.elsRcvFrame++; 3469 phba->fc_stat.elsRcvFrame++;
@@ -3463,13 +3485,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3463 rjt_err = 1; 3485 rjt_err = 1;
3464 break; 3486 break;
3465 } 3487 }
3488 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
3466 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3489 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3467 break; 3490 break;
3468 case ELS_CMD_FLOGI: 3491 case ELS_CMD_FLOGI:
3469 phba->fc_stat.elsRcvFLOGI++; 3492 phba->fc_stat.elsRcvFLOGI++;
3470 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3493 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3471 if (newnode) { 3494 if (newnode) {
3472 mempool_free( ndlp, phba->nlp_mem_pool); 3495 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3473 } 3496 }
3474 break; 3497 break;
3475 case ELS_CMD_LOGO: 3498 case ELS_CMD_LOGO:
@@ -3492,7 +3515,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3492 phba->fc_stat.elsRcvRSCN++; 3515 phba->fc_stat.elsRcvRSCN++;
3493 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3516 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3494 if (newnode) { 3517 if (newnode) {
3495 mempool_free( ndlp, phba->nlp_mem_pool); 3518 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3496 } 3519 }
3497 break; 3520 break;
3498 case ELS_CMD_ADISC: 3521 case ELS_CMD_ADISC:
@@ -3535,28 +3558,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3535 phba->fc_stat.elsRcvLIRR++; 3558 phba->fc_stat.elsRcvLIRR++;
3536 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3559 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3537 if (newnode) { 3560 if (newnode) {
3538 mempool_free( ndlp, phba->nlp_mem_pool); 3561 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3539 } 3562 }
3540 break; 3563 break;
3541 case ELS_CMD_RPS: 3564 case ELS_CMD_RPS:
3542 phba->fc_stat.elsRcvRPS++; 3565 phba->fc_stat.elsRcvRPS++;
3543 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3566 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3544 if (newnode) { 3567 if (newnode) {
3545 mempool_free( ndlp, phba->nlp_mem_pool); 3568 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3546 } 3569 }
3547 break; 3570 break;
3548 case ELS_CMD_RPL: 3571 case ELS_CMD_RPL:
3549 phba->fc_stat.elsRcvRPL++; 3572 phba->fc_stat.elsRcvRPL++;
3550 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3573 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3551 if (newnode) { 3574 if (newnode) {
3552 mempool_free( ndlp, phba->nlp_mem_pool); 3575 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3553 } 3576 }
3554 break; 3577 break;
3555 case ELS_CMD_RNID: 3578 case ELS_CMD_RNID:
3556 phba->fc_stat.elsRcvRNID++; 3579 phba->fc_stat.elsRcvRNID++;
3557 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3580 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3558 if (newnode) { 3581 if (newnode) {
3559 mempool_free( ndlp, phba->nlp_mem_pool); 3582 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3560 } 3583 }
3561 break; 3584 break;
3562 default: 3585 default:
@@ -3568,7 +3591,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3568 "%d:0115 Unknown ELS command x%x received from " 3591 "%d:0115 Unknown ELS command x%x received from "
3569 "NPORT x%x\n", phba->brd_no, cmd, did); 3592 "NPORT x%x\n", phba->brd_no, cmd, did);
3570 if (newnode) { 3593 if (newnode) {
3571 mempool_free( ndlp, phba->nlp_mem_pool); 3594 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3572 } 3595 }
3573 break; 3596 break;
3574 } 3597 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index adb086009ae0..d586c3d3b0d0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = {
56 56
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 57static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58 58
59static void 59void
60lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 60lpfc_terminate_rport_io(struct fc_rport *rport)
61{ 61{
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname; 62 struct lpfc_rport_data *rdata;
63 int warn_on = 0; 63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
64 65
65 spin_lock_irq(phba->host->host_lock); 66 rdata = rport->dd_data;
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) { 67 ndlp = rdata->pnode;
67 spin_unlock_irq(phba->host->host_lock); 68
69 if (!ndlp) {
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
73 rport->port_id);
68 return; 74 return;
69 } 75 }
70 76
71 /* 77 phba = ndlp->nlp_phba;
72 * If a discovery event readded nodev_timer after timer 78
73 * firing and before processing the timer, cancel the
74 * nlp_tmofunc.
75 */
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock); 79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 }
84 spin_unlock_irq(phba->host->host_lock);
79 85
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO; 86 return;
87}
88
89/*
90 * This function will be called when dev_loss_tmo fire.
91 */
92void
93lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
97 uint8_t *name;
98 int warn_on = 0;
99 struct lpfc_hba *phba;
100
101 rdata = rport->dd_data;
102 ndlp = rdata->pnode;
103
104 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id);
109 return;
110 }
111
112 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba;
114
115 spin_lock_irq(phba->host->host_lock);
81 116
82 if (ndlp->nlp_sid != NLP_NO_SID) { 117 if (ndlp->nlp_sid != NLP_NO_SID) {
83 warn_on = 1; 118 warn_on = 1;
@@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 120 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 121 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
87 } 122 }
123 if (phba->fc_flag & FC_UNLOADING)
124 warn_on = 0;
125
88 spin_unlock_irq(phba->host->host_lock); 126 spin_unlock_irq(phba->host->host_lock);
89 127
90 if (warn_on) { 128 if (warn_on) {
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 129 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on " 130 "%d:0203 Devloss timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n", 132 "NPort x%x Data: x%x x%x x%x\n",
95 phba->brd_no, 133 phba->brd_no,
@@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
99 ndlp->nlp_state, ndlp->nlp_rpi); 137 ndlp->nlp_state, ndlp->nlp_rpi);
100 } else { 138 } else {
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 139 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on " 140 "%d:0204 Devloss timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n", 142 "NPort x%x Data: x%x x%x x%x\n",
105 phba->brd_no, 143 phba->brd_no,
@@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
109 ndlp->nlp_state, ndlp->nlp_rpi); 147 ndlp->nlp_state, ndlp->nlp_rpi);
110 } 148 }
111 149
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 150 ndlp->rport = NULL;
151 rdata->pnode = NULL;
152
153 if (!(phba->fc_flag & FC_UNLOADING))
154 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
155
113 return; 156 return;
114} 157}
115 158
@@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba)
127 spin_unlock_irq(phba->host->host_lock); 170 spin_unlock_irq(phba->host->host_lock);
128 free_evt = 1; 171 free_evt = 1;
129 switch (evtp->evt) { 172 switch (evtp->evt) {
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
133 free_evt = 0;
134 break;
135 case LPFC_EVT_ELS_RETRY: 173 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 174 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp); 175 lpfc_els_retry_delay_handler(ndlp);
@@ -340,6 +378,9 @@ lpfc_linkdown(struct lpfc_hba * phba)
340 spin_unlock_irq(phba->host->host_lock); 378 spin_unlock_irq(phba->host->host_lock);
341 } 379 }
342 380
381 fc_host_post_event(phba->host, fc_get_event_number(),
382 FCH_EVT_LINKDOWN, 0);
383
343 /* Clean up any firmware default rpi's */ 384 /* Clean up any firmware default rpi's */
344 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 385 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
345 lpfc_unreg_did(phba, 0xffffffff, mb); 386 lpfc_unreg_did(phba, 0xffffffff, mb);
@@ -374,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba)
374 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 415 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
375 NLP_EVT_DEVICE_RECOVERY); 416 NLP_EVT_DEVICE_RECOVERY);
376 417
377 /* Check config parameter use-adisc or FCP-2 */
378 if ((rc != NLP_STE_FREED_NODE) &&
379 (phba->cfg_use_adisc == 0) &&
380 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
381 /* We know we will have to relogin, so
382 * unreglogin the rpi right now to fail
383 * any outstanding I/Os quickly.
384 */
385 lpfc_unreg_rpi(phba, ndlp);
386 }
387 } 418 }
388 } 419 }
389 420
@@ -427,6 +458,9 @@ lpfc_linkup(struct lpfc_hba * phba)
427 struct list_head *listp, *node_list[7]; 458 struct list_head *listp, *node_list[7];
428 int i; 459 int i;
429 460
461 fc_host_post_event(phba->host, fc_get_event_number(),
462 FCH_EVT_LINKUP, 0);
463
430 spin_lock_irq(phba->host->host_lock); 464 spin_lock_irq(phba->host->host_lock);
431 phba->hba_state = LPFC_LINK_UP; 465 phba->hba_state = LPFC_LINK_UP;
432 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 466 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
@@ -638,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
638 672
639 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
640 sizeof (struct serv_parm)); 674 sizeof (struct serv_parm));
675 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
641 memcpy((uint8_t *) & phba->fc_nodename, 677 memcpy((uint8_t *) & phba->fc_nodename,
642 (uint8_t *) & phba->fc_sparam.nodeName, 678 (uint8_t *) & phba->fc_sparam.nodeName,
643 sizeof (struct lpfc_name)); 679 sizeof (struct lpfc_name));
@@ -1084,7 +1120,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1084 fc_remote_port_rolechg(rport, rport_ids.roles); 1120 fc_remote_port_rolechg(rport, rport_ids.roles);
1085 1121
1086 if ((rport->scsi_target_id != -1) && 1122 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) { 1123 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id; 1124 ndlp->nlp_sid = rport->scsi_target_id;
1089 } 1125 }
1090 1126
@@ -1098,8 +1134,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1098 struct fc_rport *rport = ndlp->rport; 1134 struct fc_rport *rport = ndlp->rport;
1099 struct lpfc_rport_data *rdata = rport->dd_data; 1135 struct lpfc_rport_data *rdata = rport->dd_data;
1100 1136
1101 ndlp->rport = NULL; 1137 if (rport->scsi_target_id == -1) {
1102 rdata->pnode = NULL; 1138 ndlp->rport = NULL;
1139 rdata->pnode = NULL;
1140 }
1141
1103 fc_remote_port_delete(rport); 1142 fc_remote_port_delete(rport);
1104 1143
1105 return; 1144 return;
@@ -1227,17 +1266,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1227 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); 1266 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1228 phba->fc_unmap_cnt++; 1267 phba->fc_unmap_cnt++;
1229 phba->nport_event_cnt++; 1268 phba->nport_event_cnt++;
1230 /* stop nodev tmo if running */
1231 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1232 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1233 spin_unlock_irq(phba->host->host_lock);
1234 del_timer_sync(&nlp->nlp_tmofunc);
1235 spin_lock_irq(phba->host->host_lock);
1236 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1237 list_del_init(&nlp->nodev_timeout_evt.
1238 evt_listp);
1239
1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1269 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1242 nlp->nlp_type |= NLP_FC_NODE; 1270 nlp->nlp_type |= NLP_FC_NODE;
1243 break; 1271 break;
@@ -1248,17 +1276,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1248 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); 1276 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1249 phba->fc_map_cnt++; 1277 phba->fc_map_cnt++;
1250 phba->nport_event_cnt++; 1278 phba->nport_event_cnt++;
1251 /* stop nodev tmo if running */
1252 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1253 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1254 spin_unlock_irq(phba->host->host_lock);
1255 del_timer_sync(&nlp->nlp_tmofunc);
1256 spin_lock_irq(phba->host->host_lock);
1257 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1258 list_del_init(&nlp->nodev_timeout_evt.
1259 evt_listp);
1260
1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1279 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1263 break; 1280 break;
1264 case NLP_NPR_LIST: 1281 case NLP_NPR_LIST:
@@ -1267,11 +1284,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1267 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); 1284 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1268 phba->fc_npr_cnt++; 1285 phba->fc_npr_cnt++;
1269 1286
1270 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1271 mod_timer(&nlp->nlp_tmofunc,
1272 jiffies + HZ * phba->cfg_nodev_tmo);
1273
1274 nlp->nlp_flag |= NLP_NODEV_TMO;
1275 nlp->nlp_flag &= ~NLP_RCV_PLOGI; 1287 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1276 break; 1288 break;
1277 case NLP_JUST_DQ: 1289 case NLP_JUST_DQ:
@@ -1301,7 +1313,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1301 * already. If we have, and it's a scsi entity, be 1313 * already. If we have, and it's a scsi entity, be
1302 * sure to unblock any attached scsi devices 1314 * sure to unblock any attached scsi devices
1303 */ 1315 */
1304 if (!nlp->rport) 1316 if ((!nlp->rport) || (nlp->rport->port_state ==
1317 FC_PORTSTATE_BLOCKED))
1305 lpfc_register_remote_port(phba, nlp); 1318 lpfc_register_remote_port(phba, nlp);
1306 1319
1307 /* 1320 /*
@@ -1313,7 +1326,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1313 if ((rport_add == mapped) && 1326 if ((rport_add == mapped) &&
1314 ((!nlp->rport) || 1327 ((!nlp->rport) ||
1315 (nlp->rport->scsi_target_id == -1) || 1328 (nlp->rport->scsi_target_id == -1) ||
1316 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { 1329 (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1330 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1318 spin_lock_irq(phba->host->host_lock); 1331 spin_lock_irq(phba->host->host_lock);
1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1332 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
@@ -1557,6 +1570,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1570 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1558 } 1571 }
1559 } 1572 }
1573
1574 spin_lock_irq(phba->host->host_lock);
1560 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1575 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1561 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1576 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1562 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1577 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,18 +1584,16 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1569 mempool_free(mb, phba->mbox_mem_pool); 1584 mempool_free(mb, phba->mbox_mem_pool);
1570 } 1585 }
1571 } 1586 }
1587 spin_unlock_irq(phba->host->host_lock);
1572 1588
1573 lpfc_els_abort(phba,ndlp,0); 1589 lpfc_els_abort(phba,ndlp,0);
1574 spin_lock_irq(phba->host->host_lock); 1590 spin_lock_irq(phba->host->host_lock);
1575 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO); 1591 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1576 spin_unlock_irq(phba->host->host_lock); 1592 spin_unlock_irq(phba->host->host_lock);
1577 del_timer_sync(&ndlp->nlp_tmofunc);
1578 1593
1579 ndlp->nlp_last_elscmd = 0; 1594 ndlp->nlp_last_elscmd = 0;
1580 del_timer_sync(&ndlp->nlp_delayfunc); 1595 del_timer_sync(&ndlp->nlp_delayfunc);
1581 1596
1582 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1583 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1584 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1597 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1585 list_del_init(&ndlp->els_retry_evt.evt_listp); 1598 list_del_init(&ndlp->els_retry_evt.evt_listp);
1586 1599
@@ -1597,16 +1610,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1597int 1610int
1598lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1611lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1599{ 1612{
1600 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1601 spin_lock_irq(phba->host->host_lock);
1602 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1603 spin_unlock_irq(phba->host->host_lock);
1604 del_timer_sync(&ndlp->nlp_tmofunc);
1605 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1606 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1607
1608 }
1609
1610 1613
1611 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1614 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1612 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1615 lpfc_cancel_retry_delay_tmo(phba, ndlp);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1782 /* LOG change to REGLOGIN */ 1785 /* LOG change to REGLOGIN */
1783 /* FIND node DID reglogin */ 1786 /* FIND node DID reglogin */
1784 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1785 "%d:0931 FIND node DID reglogin" 1788 "%d:0901 FIND node DID reglogin"
1786 " Data: x%p x%x x%x x%x\n", 1789 " Data: x%p x%x x%x x%x\n",
1787 phba->brd_no, 1790 phba->brd_no,
1788 ndlp, ndlp->nlp_DID, 1791 ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1805 /* LOG change to PRLI */ 1808 /* LOG change to PRLI */
1806 /* FIND node DID prli */ 1809 /* FIND node DID prli */
1807 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1808 "%d:0931 FIND node DID prli " 1811 "%d:0902 FIND node DID prli "
1809 "Data: x%p x%x x%x x%x\n", 1812 "Data: x%p x%x x%x x%x\n",
1810 phba->brd_no, 1813 phba->brd_no,
1811 ndlp, ndlp->nlp_DID, 1814 ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1828 /* LOG change to NPR */ 1831 /* LOG change to NPR */
1829 /* FIND node DID npr */ 1832 /* FIND node DID npr */
1830 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1831 "%d:0931 FIND node DID npr " 1834 "%d:0903 FIND node DID npr "
1832 "Data: x%p x%x x%x x%x\n", 1835 "Data: x%p x%x x%x x%x\n",
1833 phba->brd_no, 1836 phba->brd_no,
1834 ndlp, ndlp->nlp_DID, 1837 ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1851 /* LOG change to UNUSED */ 1854 /* LOG change to UNUSED */
1852 /* FIND node DID unused */ 1855 /* FIND node DID unused */
1853 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1854 "%d:0931 FIND node DID unused " 1857 "%d:0905 FIND node DID unused "
1855 "Data: x%p x%x x%x x%x\n", 1858 "Data: x%p x%x x%x x%x\n",
1856 phba->brd_no, 1859 phba->brd_no,
1857 ndlp, ndlp->nlp_DID, 1860 ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2335 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2338 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2336 if (!initlinkmbox) { 2339 if (!initlinkmbox) {
2337 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2340 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2338 "%d:0226 Device Discovery " 2341 "%d:0206 Device Discovery "
2339 "completion error\n", 2342 "completion error\n",
2340 phba->brd_no); 2343 phba->brd_no);
2341 phba->hba_state = LPFC_HBA_ERROR; 2344 phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2365 if (!clearlambox) { 2368 if (!clearlambox) {
2366 clrlaerr = 1; 2369 clrlaerr = 1;
2367 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2370 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2368 "%d:0226 Device Discovery " 2371 "%d:0207 Device Discovery "
2369 "completion error\n", 2372 "completion error\n",
2370 phba->brd_no); 2373 phba->brd_no);
2371 phba->hba_state = LPFC_HBA_ERROR; 2374 phba->hba_state = LPFC_HBA_ERROR;
@@ -2421,34 +2424,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2421 return; 2424 return;
2422} 2425}
2423 2426
2424static void
2425lpfc_nodev_timeout(unsigned long ptr)
2426{
2427 struct lpfc_hba *phba;
2428 struct lpfc_nodelist *ndlp;
2429 unsigned long iflag;
2430 struct lpfc_work_evt *evtp;
2431
2432 ndlp = (struct lpfc_nodelist *)ptr;
2433 phba = ndlp->nlp_phba;
2434 evtp = &ndlp->nodev_timeout_evt;
2435 spin_lock_irqsave(phba->host->host_lock, iflag);
2436
2437 if (!list_empty(&evtp->evt_listp)) {
2438 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2439 return;
2440 }
2441 evtp->evt_arg1 = ndlp;
2442 evtp->evt = LPFC_EVT_NODEV_TMO;
2443 list_add_tail(&evtp->evt_listp, &phba->work_list);
2444 if (phba->work_wait)
2445 wake_up(phba->work_wait);
2446
2447 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2448 return;
2449}
2450
2451
2452/* 2427/*
2453 * This routine handles processing a NameServer REG_LOGIN mailbox 2428 * This routine handles processing a NameServer REG_LOGIN mailbox
2454 * command upon completion. It is setup in the LPFC_MBOXQ 2429 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -2572,11 +2547,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2572 uint32_t did) 2547 uint32_t did)
2573{ 2548{
2574 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2549 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2575 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2576 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2550 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2577 init_timer(&ndlp->nlp_tmofunc);
2578 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2579 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2580 init_timer(&ndlp->nlp_delayfunc); 2551 init_timer(&ndlp->nlp_delayfunc);
2581 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2552 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2582 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2553 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 908d0f27706f..4cdf3464267f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
71 uint16_t offset = 0; 71 uint16_t offset = 0;
72 static char licensed[56] = 72 static char licensed[56] =
73 "key unlock for use with gnu public licensed code only\0"; 73 "key unlock for use with gnu public licensed code only\0";
74 static int init_key = 1;
74 75
75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
76 if (!pmb) { 77 if (!pmb) {
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 phba->hba_state = LPFC_INIT_MBX_CMDS;
83 84
84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
85 uint32_t *ptext = (uint32_t *) licensed; 86 if (init_key) {
87 uint32_t *ptext = (uint32_t *) licensed;
86 88
87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
88 *ptext = cpu_to_be32(*ptext); 90 *ptext = cpu_to_be32(*ptext);
91 init_key = 0;
92 }
89 93
90 lpfc_read_nv(phba, pmb); 94 lpfc_read_nv(phba, pmb);
91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 95 memset((char*)mb->un.varRDnvp.rsvd3, 0,
@@ -264,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
264 kfree(mp); 268 kfree(mp);
265 pmb->context1 = NULL; 269 pmb->context1 = NULL;
266 270
271 if (phba->cfg_soft_wwpn)
272 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
267 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 273 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
268 sizeof (struct lpfc_name)); 274 sizeof (struct lpfc_name));
269 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 275 memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
@@ -405,19 +411,26 @@ lpfc_config_port_post(struct lpfc_hba * phba)
405 } 411 }
406 /* MBOX buffer will be freed in mbox compl */ 412 /* MBOX buffer will be freed in mbox compl */
407 413
408 i = 0; 414 return (0);
415}
416
417static int
418lpfc_discovery_wait(struct lpfc_hba *phba)
419{
420 int i = 0;
421
409 while ((phba->hba_state != LPFC_HBA_READY) || 422 while ((phba->hba_state != LPFC_HBA_READY) ||
410 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 423 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
411 ((phba->fc_map_cnt == 0) && (i<2)) || 424 ((phba->fc_map_cnt == 0) && (i<2)) ||
412 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 425 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
413 /* Check every second for 30 retries. */ 426 /* Check every second for 30 retries. */
414 i++; 427 i++;
415 if (i > 30) { 428 if (i > 30) {
416 break; 429 return -ETIMEDOUT;
417 } 430 }
418 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 431 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
419 /* The link is down. Set linkdown timeout */ 432 /* The link is down. Set linkdown timeout */
420 break; 433 return -ETIMEDOUT;
421 } 434 }
422 435
423 /* Delay for 1 second to give discovery time to complete. */ 436 /* Delay for 1 second to give discovery time to complete. */
@@ -425,12 +438,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
425 438
426 } 439 }
427 440
428 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 441 return 0;
429 * any potential PRLIs to flush thru the SLI sub-system.
430 */
431 msleep(50);
432
433 return (0);
434} 442}
435 443
436/************************************************************************/ 444/************************************************************************/
@@ -505,6 +513,7 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
505{ 513{
506 struct lpfc_sli *psli = &phba->sli; 514 struct lpfc_sli *psli = &phba->sli;
507 struct lpfc_sli_ring *pring; 515 struct lpfc_sli_ring *pring;
516 uint32_t event_data;
508 517
509 if (phba->work_hs & HS_FFER6) { 518 if (phba->work_hs & HS_FFER6) {
510 /* Re-establishing Link */ 519 /* Re-establishing Link */
@@ -549,6 +558,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
549 phba->brd_no, phba->work_hs, 558 phba->brd_no, phba->work_hs,
550 phba->work_status[0], phba->work_status[1]); 559 phba->work_status[0], phba->work_status[1]);
551 560
561 event_data = FC_REG_DUMP_EVENT;
562 fc_host_post_vendor_event(phba->host, fc_get_event_number(),
563 sizeof(event_data), (char *) &event_data,
564 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
565
552 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 566 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
553 lpfc_offline(phba); 567 lpfc_offline(phba);
554 phba->hba_state = LPFC_HBA_ERROR; 568 phba->hba_state = LPFC_HBA_ERROR;
@@ -939,12 +953,12 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
939 "10-port ", "PCIe"}; 953 "10-port ", "PCIe"};
940 break; 954 break;
941 default: 955 default:
942 m = (typeof(m)){ 0 }; 956 m = (typeof(m)){ NULL };
943 break; 957 break;
944 } 958 }
945 break; 959 break;
946 default: 960 default:
947 m = (typeof(m)){ 0 }; 961 m = (typeof(m)){ NULL };
948 break; 962 break;
949 } 963 }
950 964
@@ -1339,7 +1353,8 @@ lpfc_offline(struct lpfc_hba * phba)
1339 struct lpfc_sli_ring *pring; 1353 struct lpfc_sli_ring *pring;
1340 struct lpfc_sli *psli; 1354 struct lpfc_sli *psli;
1341 unsigned long iflag; 1355 unsigned long iflag;
1342 int i = 0; 1356 int i;
1357 int cnt = 0;
1343 1358
1344 if (!phba) 1359 if (!phba)
1345 return 0; 1360 return 0;
@@ -1348,20 +1363,31 @@ lpfc_offline(struct lpfc_hba * phba)
1348 return 0; 1363 return 0;
1349 1364
1350 psli = &phba->sli; 1365 psli = &phba->sli;
1351 pring = &psli->ring[psli->fcp_ring];
1352 1366
1353 lpfc_linkdown(phba); 1367 lpfc_linkdown(phba);
1368 lpfc_sli_flush_mbox_queue(phba);
1354 1369
1355 /* The linkdown event takes 30 seconds to timeout. */ 1370 for (i = 0; i < psli->num_rings; i++) {
1356 while (pring->txcmplq_cnt) { 1371 pring = &psli->ring[i];
1357 mdelay(10); 1372 /* The linkdown event takes 30 seconds to timeout. */
1358 if (i++ > 3000) 1373 while (pring->txcmplq_cnt) {
1359 break; 1374 mdelay(10);
1375 if (cnt++ > 3000) {
1376 lpfc_printf_log(phba,
1377 KERN_WARNING, LOG_INIT,
1378 "%d:0466 Outstanding IO when "
1379 "bringing Adapter offline\n",
1380 phba->brd_no);
1381 break;
1382 }
1383 }
1360 } 1384 }
1361 1385
1386
1362 /* stop all timers associated with this hba */ 1387 /* stop all timers associated with this hba */
1363 lpfc_stop_timer(phba); 1388 lpfc_stop_timer(phba);
1364 phba->work_hba_events = 0; 1389 phba->work_hba_events = 0;
1390 phba->work_ha = 0;
1365 1391
1366 lpfc_printf_log(phba, 1392 lpfc_printf_log(phba,
1367 KERN_WARNING, 1393 KERN_WARNING,
@@ -1451,7 +1477,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1451 goto out_put_host; 1477 goto out_put_host;
1452 1478
1453 host->unique_id = phba->brd_no; 1479 host->unique_id = phba->brd_no;
1454 init_MUTEX(&phba->hba_can_block);
1455 INIT_LIST_HEAD(&phba->ctrspbuflist); 1480 INIT_LIST_HEAD(&phba->ctrspbuflist);
1456 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1481 INIT_LIST_HEAD(&phba->rnidrspbuflist);
1457 INIT_LIST_HEAD(&phba->freebufList); 1482 INIT_LIST_HEAD(&phba->freebufList);
@@ -1600,7 +1625,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1600 goto out_free_iocbq; 1625 goto out_free_iocbq;
1601 } 1626 }
1602 1627
1603 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1628 /*
1629 * Set initial can_queue value since 0 is no longer supported and
1630 * scsi_add_host will fail. This will be adjusted later based on the
1631 * max xri value determined in hba setup.
1632 */
1604 host->can_queue = phba->cfg_hba_queue_depth - 10; 1633 host->can_queue = phba->cfg_hba_queue_depth - 10;
1605 1634
1606 /* Tell the midlayer we support 16 byte commands */ 1635 /* Tell the midlayer we support 16 byte commands */
@@ -1620,7 +1649,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1620 if (error) 1649 if (error)
1621 goto out_remove_host; 1650 goto out_remove_host;
1622 1651
1623 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1652 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1624 LPFC_DRIVER_NAME, phba); 1653 LPFC_DRIVER_NAME, phba);
1625 if (error) { 1654 if (error) {
1626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1640,6 +1669,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1640 goto out_free_irq; 1669 goto out_free_irq;
1641 } 1670 }
1642 1671
1672 /*
1673 * hba setup may have changed the hba_queue_depth so we need to adjust
1674 * the value of can_queue.
1675 */
1676 host->can_queue = phba->cfg_hba_queue_depth - 10;
1677
1678 lpfc_discovery_wait(phba);
1679
1643 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1680 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1644 spin_lock_irq(phba->host->host_lock); 1681 spin_lock_irq(phba->host->host_lock);
1645 lpfc_poll_start_timer(phba); 1682 lpfc_poll_start_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22aaf71b..4d016c2a1b26 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
651 651
652 return mbq; 652 return mbq;
653} 653}
654
655int
656lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
657{
658 switch (cmd) {
659 case MBX_WRITE_NV: /* 0x03 */
660 case MBX_UPDATE_CFG: /* 0x1B */
661 case MBX_DOWN_LOAD: /* 0x1C */
662 case MBX_DEL_LD_ENTRY: /* 0x1D */
663 case MBX_LOAD_AREA: /* 0x81 */
664 case MBX_FLASH_WR_ULA: /* 0x98 */
665 case MBX_LOAD_EXP_ROM: /* 0x9C */
666 return LPFC_MBOX_TMO_FLASH_CMD;
667 }
668 return LPFC_MBOX_TMO;
669}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 07017658ac56..066292d3995a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba)
133 133
134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
135 pci_pool_destroy(phba->lpfc_mbuf_pool); 135 pci_pool_destroy(phba->lpfc_mbuf_pool);
136
137 /* Free the iocb lookup array */
138 kfree(psli->iocbq_lookup);
139 psli->iocbq_lookup = NULL;
140
136} 141}
137 142
138void * 143void *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27d60ad897cd..d5f415007db2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
179 179
180 /* Abort outstanding I/O on NPort <nlp_DID> */ 180 /* Abort outstanding I/O on NPort <nlp_DID> */
181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
182 "%d:0201 Abort outstanding I/O on NPort x%x " 182 "%d:0205 Abort outstanding I/O on NPort x%x "
183 "Data: x%x x%x x%x\n", 183 "Data: x%x x%x x%x\n",
184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
185 ndlp->nlp_state, ndlp->nlp_rpi); 185 ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
393 mbox->context2 = ndlp; 393 mbox->context2 = ndlp;
394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
395 395
396 /*
397 * If there is an outstanding PLOGI issued, abort it before
398 * sending ACC rsp for received PLOGI. If pending plogi
399 * is not canceled here, the plogi will be rejected by
400 * remote port and will be retried. On a configuration with
401 * single discovery thread, this will cause a huge delay in
402 * discovery. Also this will cause multiple state machines
403 * running in parallel for this node.
404 */
405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
406 /* software abort outstanding PLOGI */
407 lpfc_els_abort(phba, ndlp, 1);
408 }
409
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return 1; 411 return 1;
398 412
@@ -1110,6 +1124,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1110 phba->brd_no, 1124 phba->brd_no,
1111 did, mb->mbxStatus, phba->hba_state); 1125 did, mb->mbxStatus, phba->hba_state);
1112 1126
1127 /*
1128 * If RegLogin failed due to lack of HBA resources do not
1129 * retry discovery.
1130 */
1131 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1132 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1133 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
1134 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
1135 return ndlp->nlp_state;
1136 }
1137
1113 /* Put ndlp in npr list set plogi timer for 1 sec */ 1138 /* Put ndlp in npr list set plogi timer for 1 sec */
1114 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1139 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1115 spin_lock_irq(phba->host->host_lock); 1140 spin_lock_irq(phba->host->host_lock);
@@ -1590,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1590 1615
1591 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1616 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1592 1617
1593 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1618 /*
1619 * Do not start discovery if discovery is about to start
1620 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads.
1622 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1594 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1595 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1596 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
@@ -1782,7 +1813,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1782 */ 1813 */
1783/* 1814/*
1784 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1785 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1786 * expire, all effected nodes will receive a DEVICE_RM event. 1817 * expire, all effected nodes will receive a DEVICE_RM event.
1787 */ 1818 */
1788/* 1819/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7dc4c2e6bed2..97ae98dc95d0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/delay.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
@@ -41,20 +42,6 @@
41#define LPFC_ABORT_WAIT 2 42#define LPFC_ABORT_WAIT 2
42 43
43 44
44static inline void
45lpfc_block_requests(struct lpfc_hba * phba)
46{
47 down(&phba->hba_can_block);
48 scsi_block_requests(phba->host);
49}
50
51static inline void
52lpfc_unblock_requests(struct lpfc_hba * phba)
53{
54 scsi_unblock_requests(phba->host);
55 up(&phba->hba_can_block);
56}
57
58/* 45/*
59 * This routine allocates a scsi buffer, which contains all the necessary 46 * This routine allocates a scsi buffer, which contains all the necessary
60 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 47 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
@@ -167,22 +154,6 @@ static void
167lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 154lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
168{ 155{
169 unsigned long iflag = 0; 156 unsigned long iflag = 0;
170 /*
171 * There are only two special cases to consider. (1) the scsi command
172 * requested scatter-gather usage or (2) the scsi command allocated
173 * a request buffer, but did not request use_sg. There is a third
174 * case, but it does not require resource deallocation.
175 */
176 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
177 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
178 psb->seg_cnt, psb->pCmd->sc_data_direction);
179 } else {
180 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
181 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
182 psb->pCmd->request_bufflen,
183 psb->pCmd->sc_data_direction);
184 }
185 }
186 157
187 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 158 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
188 psb->pCmd = NULL; 159 psb->pCmd = NULL;
@@ -296,6 +267,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
296} 267}
297 268
298static void 269static void
270lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
271{
272 /*
273 * There are only two special cases to consider. (1) the scsi command
274 * requested scatter-gather usage or (2) the scsi command allocated
275 * a request buffer, but did not request use_sg. There is a third
276 * case, but it does not require resource deallocation.
277 */
278 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
279 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
280 psb->seg_cnt, psb->pCmd->sc_data_direction);
281 } else {
282 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
283 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
284 psb->pCmd->request_bufflen,
285 psb->pCmd->sc_data_direction);
286 }
287 }
288}
289
290static void
299lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 291lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
300{ 292{
301 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 293 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
@@ -468,6 +460,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
468 cmd->scsi_done(cmd); 460 cmd->scsi_done(cmd);
469 461
470 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 462 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
463 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
471 lpfc_release_scsi_buf(phba, lpfc_cmd); 464 lpfc_release_scsi_buf(phba, lpfc_cmd);
472 return; 465 return;
473 } 466 }
@@ -525,6 +518,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
525 } 518 }
526 } 519 }
527 520
521 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
528 lpfc_release_scsi_buf(phba, lpfc_cmd); 522 lpfc_release_scsi_buf(phba, lpfc_cmd);
529} 523}
530 524
@@ -623,6 +617,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
623static int 617static int
624lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 618lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
625 struct lpfc_scsi_buf *lpfc_cmd, 619 struct lpfc_scsi_buf *lpfc_cmd,
620 unsigned int lun,
626 uint8_t task_mgmt_cmd) 621 uint8_t task_mgmt_cmd)
627{ 622{
628 struct lpfc_sli *psli; 623 struct lpfc_sli *psli;
@@ -641,8 +636,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
641 piocb = &piocbq->iocb; 636 piocb = &piocbq->iocb;
642 637
643 fcp_cmnd = lpfc_cmd->fcp_cmnd; 638 fcp_cmnd = lpfc_cmd->fcp_cmnd;
644 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 639 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
645 &lpfc_cmd->fcp_cmnd->fcp_lun);
646 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 640 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
647 641
648 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 642 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -669,14 +663,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
669 663
670static int 664static int
671lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 665lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
672 unsigned tgt_id, struct lpfc_rport_data *rdata) 666 unsigned tgt_id, unsigned int lun,
667 struct lpfc_rport_data *rdata)
673{ 668{
674 struct lpfc_iocbq *iocbq; 669 struct lpfc_iocbq *iocbq;
675 struct lpfc_iocbq *iocbqrsp; 670 struct lpfc_iocbq *iocbqrsp;
676 int ret; 671 int ret;
677 672
678 lpfc_cmd->rdata = rdata; 673 lpfc_cmd->rdata = rdata;
679 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 674 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
675 FCP_TARGET_RESET);
680 if (!ret) 676 if (!ret)
681 return FAILED; 677 return FAILED;
682 678
@@ -836,6 +832,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
836 return 0; 832 return 0;
837 833
838 out_host_busy_free_buf: 834 out_host_busy_free_buf:
835 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
839 lpfc_release_scsi_buf(phba, lpfc_cmd); 836 lpfc_release_scsi_buf(phba, lpfc_cmd);
840 out_host_busy: 837 out_host_busy:
841 return SCSI_MLQUEUE_HOST_BUSY; 838 return SCSI_MLQUEUE_HOST_BUSY;
@@ -845,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
845 return 0; 842 return 0;
846} 843}
847 844
845static void
846lpfc_block_error_handler(struct scsi_cmnd *cmnd)
847{
848 struct Scsi_Host *shost = cmnd->device->host;
849 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
850
851 spin_lock_irq(shost->host_lock);
852 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
853 spin_unlock_irq(shost->host_lock);
854 msleep(1000);
855 spin_lock_irq(shost->host_lock);
856 }
857 spin_unlock_irq(shost->host_lock);
858 return;
859}
848 860
849static int 861static int
850lpfc_abort_handler(struct scsi_cmnd *cmnd) 862lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -859,7 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
859 unsigned int loop_count = 0; 871 unsigned int loop_count = 0;
860 int ret = SUCCESS; 872 int ret = SUCCESS;
861 873
862 lpfc_block_requests(phba); 874 lpfc_block_error_handler(cmnd);
863 spin_lock_irq(shost->host_lock); 875 spin_lock_irq(shost->host_lock);
864 876
865 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 877 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -923,7 +935,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
923 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
924 spin_lock_irq(phba->host->host_lock); 936 spin_lock_irq(phba->host->host_lock);
925 if (++loop_count 937 if (++loop_count
926 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 938 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
927 break; 939 break;
928 } 940 }
929 941
@@ -945,7 +957,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
945 cmnd->device->lun, cmnd->serial_number); 957 cmnd->device->lun, cmnd->serial_number);
946 958
947 spin_unlock_irq(shost->host_lock); 959 spin_unlock_irq(shost->host_lock);
948 lpfc_unblock_requests(phba);
949 960
950 return ret; 961 return ret;
951} 962}
@@ -963,11 +974,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
963 int ret = FAILED; 974 int ret = FAILED;
964 int cnt, loopcnt; 975 int cnt, loopcnt;
965 976
966 lpfc_block_requests(phba); 977 lpfc_block_error_handler(cmnd);
967 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
968 /* 979 /*
969 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
970 * target is rediscovered or nodev timeout expires. 981 * target is rediscovered or devloss timeout expires.
971 */ 982 */
972 while ( 1 ) { 983 while ( 1 ) {
973 if (!pnode) 984 if (!pnode)
@@ -986,12 +997,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
986 if (lpfc_cmd == NULL) 997 if (lpfc_cmd == NULL)
987 goto out; 998 goto out;
988 999
989 lpfc_cmd->pCmd = cmnd;
990 lpfc_cmd->timeout = 60; 1000 lpfc_cmd->timeout = 60;
991 lpfc_cmd->scsi_hba = phba; 1001 lpfc_cmd->scsi_hba = phba;
992 lpfc_cmd->rdata = rdata; 1002 lpfc_cmd->rdata = rdata;
993 1003
994 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 1004 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
1005 FCP_LUN_RESET);
995 if (!ret) 1006 if (!ret)
996 goto out_free_scsi_buf; 1007 goto out_free_scsi_buf;
997 1008
@@ -1018,7 +1029,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1018 cmd_status = iocbqrsp->iocb.ulpStatus; 1029 cmd_status = iocbqrsp->iocb.ulpStatus;
1019 1030
1020 lpfc_sli_release_iocbq(phba, iocbqrsp); 1031 lpfc_sli_release_iocbq(phba, iocbqrsp);
1021 lpfc_release_scsi_buf(phba, lpfc_cmd);
1022 1032
1023 /* 1033 /*
1024 * All outstanding txcmplq I/Os should have been aborted by the device. 1034 * All outstanding txcmplq I/Os should have been aborted by the device.
@@ -1040,7 +1050,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1040 spin_lock_irq(phba->host->host_lock); 1050 spin_lock_irq(phba->host->host_lock);
1041 1051
1042 if (++loopcnt 1052 if (++loopcnt
1043 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1053 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1044 break; 1054 break;
1045 1055
1046 cnt = lpfc_sli_sum_iocb(phba, 1056 cnt = lpfc_sli_sum_iocb(phba,
@@ -1057,6 +1067,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1057 } 1067 }
1058 1068
1059out_free_scsi_buf: 1069out_free_scsi_buf:
1070 lpfc_release_scsi_buf(phba, lpfc_cmd);
1071
1060 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1072 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1061 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1073 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1062 "Data: x%x x%x x%x\n", 1074 "Data: x%x x%x x%x\n",
@@ -1065,7 +1077,6 @@ out_free_scsi_buf:
1065 1077
1066out: 1078out:
1067 spin_unlock_irq(shost->host_lock); 1079 spin_unlock_irq(shost->host_lock);
1068 lpfc_unblock_requests(phba);
1069 return ret; 1080 return ret;
1070} 1081}
1071 1082
@@ -1080,7 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1080 int cnt, loopcnt; 1091 int cnt, loopcnt;
1081 struct lpfc_scsi_buf * lpfc_cmd; 1092 struct lpfc_scsi_buf * lpfc_cmd;
1082 1093
1083 lpfc_block_requests(phba); 1094 lpfc_block_error_handler(cmnd);
1084 spin_lock_irq(shost->host_lock); 1095 spin_lock_irq(shost->host_lock);
1085 1096
1086 lpfc_cmd = lpfc_get_scsi_buf(phba); 1097 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1089,7 +1100,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1089 1100
1090 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1101 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1091 lpfc_cmd->timeout = 60; 1102 lpfc_cmd->timeout = 60;
1092 lpfc_cmd->pCmd = cmnd;
1093 lpfc_cmd->scsi_hba = phba; 1103 lpfc_cmd->scsi_hba = phba;
1094 1104
1095 /* 1105 /*
@@ -1097,7 +1107,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1097 * targets known to the driver. Should any target reset 1107 * targets known to the driver. Should any target reset
1098 * fail, this routine returns failure to the midlayer. 1108 * fail, this routine returns failure to the midlayer.
1099 */ 1109 */
1100 for (i = 0; i < MAX_FCP_TARGET; i++) { 1110 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1101 /* Search the mapped list for this target ID */ 1111 /* Search the mapped list for this target ID */
1102 match = 0; 1112 match = 0;
1103 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1113 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
@@ -1109,11 +1119,11 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1109 if (!match) 1119 if (!match)
1110 continue; 1120 continue;
1111 1121
1112 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, 1122 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
1113 i, ndlp->rport->dd_data); 1123 ndlp->rport->dd_data);
1114 if (ret != SUCCESS) { 1124 if (ret != SUCCESS) {
1115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1125 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1116 "%d:0713 Bus Reset on target %d failed\n", 1126 "%d:0700 Bus Reset on target %d failed\n",
1117 phba->brd_no, i); 1127 phba->brd_no, i);
1118 err_count++; 1128 err_count++;
1119 } 1129 }
@@ -1141,7 +1151,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1141 spin_lock_irq(phba->host->host_lock); 1151 spin_lock_irq(phba->host->host_lock);
1142 1152
1143 if (++loopcnt 1153 if (++loopcnt
1144 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1154 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1145 break; 1155 break;
1146 1156
1147 cnt = lpfc_sli_sum_iocb(phba, 1157 cnt = lpfc_sli_sum_iocb(phba,
@@ -1163,7 +1173,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1163 phba->brd_no, ret); 1173 phba->brd_no, ret);
1164out: 1174out:
1165 spin_unlock_irq(shost->host_lock); 1175 spin_unlock_irq(shost->host_lock);
1166 lpfc_unblock_requests(phba);
1167 return ret; 1176 return ret;
1168} 1177}
1169 1178
@@ -1240,7 +1249,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
1240 * target pointer is stored in the starget_data for the 1249 * target pointer is stored in the starget_data for the
1241 * driver's sysfs entry point functions. 1250 * driver's sysfs entry point functions.
1242 */ 1251 */
1243 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1252 rport->dev_loss_tmo = phba->cfg_devloss_tmo;
1244 1253
1245 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1246 lpfc_sli_poll_fcp_ring(phba); 1255 lpfc_sli_poll_fcp_ring(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb69a7a1ec59..70f4d5a1348e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -191,35 +191,12 @@ static int
191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
193{ 193{
194 uint16_t iotag;
195
196 list_add_tail(&piocb->list, &pring->txcmplq); 194 list_add_tail(&piocb->list, &pring->txcmplq);
197 pring->txcmplq_cnt++; 195 pring->txcmplq_cnt++;
198 if (unlikely(pring->ringno == LPFC_ELS_RING)) 196 if (unlikely(pring->ringno == LPFC_ELS_RING))
199 mod_timer(&phba->els_tmofunc, 197 mod_timer(&phba->els_tmofunc,
200 jiffies + HZ * (phba->fc_ratov << 1)); 198 jiffies + HZ * (phba->fc_ratov << 1));
201 199
202 if (pring->fast_lookup) {
203 /* Setup fast lookup based on iotag for completion */
204 iotag = piocb->iocb.ulpIoTag;
205 if (iotag && (iotag < pring->fast_iotag))
206 *(pring->fast_lookup + iotag) = piocb;
207 else {
208
209 /* Cmd ring <ringno> put: iotag <iotag> greater then
210 configured max <fast_iotag> wd0 <icmd> */
211 lpfc_printf_log(phba,
212 KERN_ERR,
213 LOG_SLI,
214 "%d:0316 Cmd ring %d put: iotag x%x "
215 "greater then configured max x%x "
216 "wd0 x%x\n",
217 phba->brd_no,
218 pring->ringno, iotag,
219 pring->fast_iotag,
220 *(((uint32_t *)(&piocb->iocb)) + 7));
221 }
222 }
223 return (0); 200 return (0);
224} 201}
225 202
@@ -343,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
343 kfree(old_arr); 320 kfree(old_arr);
344 return iotag; 321 return iotag;
345 } 322 }
346 } 323 } else
324 spin_unlock_irq(phba->host->host_lock);
347 325
348 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 326 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
349 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 327 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -601,7 +579,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 579 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
602 <status> */ 580 <status> */
603 lpfc_printf_log(phba, 581 lpfc_printf_log(phba,
604 KERN_ERR, 582 KERN_WARNING,
605 LOG_MBOX | LOG_SLI, 583 LOG_MBOX | LOG_SLI,
606 "%d:0304 Stray Mailbox Interrupt " 584 "%d:0304 Stray Mailbox Interrupt "
607 "mbxCommand x%x mbxStatus x%x\n", 585 "mbxCommand x%x mbxStatus x%x\n",
@@ -992,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
992 * resources need to be recovered. 970 * resources need to be recovered.
993 */ 971 */
994 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 972 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
995 printk(KERN_INFO "%s: IOCB cmd 0x%x processed." 973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
996 " Skipping completion\n", __FUNCTION__, 974 "%d:0314 IOCB cmd 0x%x"
997 irsp->ulpCommand); 975 " processed. Skipping"
976 " completion", phba->brd_no,
977 irsp->ulpCommand);
998 break; 978 break;
999 } 979 }
1000 980
@@ -1127,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1127 if (unlikely(irsp->ulpStatus)) { 1107 if (unlikely(irsp->ulpStatus)) {
1128 /* Rsp ring <ringno> error: IOCB */ 1108 /* Rsp ring <ringno> error: IOCB */
1129 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1109 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1130 "%d:0326 Rsp Ring %d error: IOCB Data: " 1110 "%d:0336 Rsp Ring %d error: IOCB Data: "
1131 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1111 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1132 phba->brd_no, pring->ringno, 1112 phba->brd_no, pring->ringno,
1133 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1113 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1145,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1145 * resources need to be recovered. 1125 * resources need to be recovered.
1146 */ 1126 */
1147 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1127 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1148 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 1128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1149 "Skipping completion\n", __FUNCTION__, 1129 "%d:0333 IOCB cmd 0x%x"
1150 irsp->ulpCommand); 1130 " processed. Skipping"
1131 " completion\n", phba->brd_no,
1132 irsp->ulpCommand);
1151 break; 1133 break;
1152 } 1134 }
1153 1135
@@ -1178,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1178 } else { 1160 } else {
1179 /* Unknown IOCB command */ 1161 /* Unknown IOCB command */
1180 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1181 "%d:0321 Unknown IOCB command " 1163 "%d:0334 Unknown IOCB command "
1182 "Data: x%x, x%x x%x x%x x%x\n", 1164 "Data: x%x, x%x x%x x%x x%x\n",
1183 phba->brd_no, type, irsp->ulpCommand, 1165 phba->brd_no, type, irsp->ulpCommand,
1184 irsp->ulpStatus, irsp->ulpIoTag, 1166 irsp->ulpStatus, irsp->ulpIoTag,
@@ -1261,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1261 lpfc_printf_log(phba, 1243 lpfc_printf_log(phba,
1262 KERN_ERR, 1244 KERN_ERR,
1263 LOG_SLI, 1245 LOG_SLI,
1264 "%d:0312 Ring %d handler: portRspPut %d " 1246 "%d:0303 Ring %d handler: portRspPut %d "
1265 "is bigger then rsp ring %d\n", 1247 "is bigger then rsp ring %d\n",
1266 phba->brd_no, 1248 phba->brd_no,
1267 pring->ringno, portRspPut, portRspMax); 1249 pring->ringno, portRspPut, portRspMax);
@@ -1406,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1406 lpfc_printf_log(phba, 1388 lpfc_printf_log(phba,
1407 KERN_ERR, 1389 KERN_ERR,
1408 LOG_SLI, 1390 LOG_SLI,
1409 "%d:0321 Unknown IOCB command " 1391 "%d:0335 Unknown IOCB command "
1410 "Data: x%x x%x x%x x%x\n", 1392 "Data: x%x x%x x%x x%x\n",
1411 phba->brd_no, 1393 phba->brd_no,
1412 irsp->ulpCommand, 1394 irsp->ulpCommand,
@@ -1422,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1422 next_iocb, 1404 next_iocb,
1423 &saveq->list, 1405 &saveq->list,
1424 list) { 1406 list) {
1407 list_del(&rspiocbp->list);
1425 lpfc_sli_release_iocbq(phba, 1408 lpfc_sli_release_iocbq(phba,
1426 rspiocbp); 1409 rspiocbp);
1427 } 1410 }
1428 } 1411 }
1429
1430 lpfc_sli_release_iocbq(phba, saveq); 1412 lpfc_sli_release_iocbq(phba, saveq);
1431 } 1413 }
1432 } 1414 }
@@ -1570,8 +1552,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1570 1552
1571void lpfc_reset_barrier(struct lpfc_hba * phba) 1553void lpfc_reset_barrier(struct lpfc_hba * phba)
1572{ 1554{
1573 uint32_t * resp_buf; 1555 uint32_t __iomem *resp_buf;
1574 uint32_t * mbox_buf; 1556 uint32_t __iomem *mbox_buf;
1575 volatile uint32_t mbox; 1557 volatile uint32_t mbox;
1576 uint32_t hc_copy; 1558 uint32_t hc_copy;
1577 int i; 1559 int i;
@@ -1587,7 +1569,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1587 * Tell the other part of the chip to suspend temporarily all 1569 * Tell the other part of the chip to suspend temporarily all
1588 * its DMA activity. 1570 * its DMA activity.
1589 */ 1571 */
1590 resp_buf = (uint32_t *)phba->MBslimaddr; 1572 resp_buf = phba->MBslimaddr;
1591 1573
1592 /* Disable the error attention */ 1574 /* Disable the error attention */
1593 hc_copy = readl(phba->HCregaddr); 1575 hc_copy = readl(phba->HCregaddr);
@@ -1605,7 +1587,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1587 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1606 1588
1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1589 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1608 mbox_buf = (uint32_t *)phba->MBslimaddr; 1590 mbox_buf = phba->MBslimaddr;
1609 writel(mbox, mbox_buf); 1591 writel(mbox, mbox_buf);
1610 1592
1611 for (i = 0; 1593 for (i = 0;
@@ -1734,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1734 phba->fc_myDID = 0; 1716 phba->fc_myDID = 0;
1735 phba->fc_prevDID = 0; 1717 phba->fc_prevDID = 0;
1736 1718
1737 psli->sli_flag = 0;
1738
1739 /* Turn off parity checking and serr during the physical reset */ 1719 /* Turn off parity checking and serr during the physical reset */
1740 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1720 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1741 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1721 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1742 (cfg_value & 1722 (cfg_value &
1743 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1723 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1744 1724
1745 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1725 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1746 /* Now toggle INITFF bit in the Host Control Register */ 1726 /* Now toggle INITFF bit in the Host Control Register */
1747 writel(HC_INITFF, phba->HCregaddr); 1727 writel(HC_INITFF, phba->HCregaddr);
1748 mdelay(1); 1728 mdelay(1);
@@ -1783,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1783 1763
1784 /* Restart HBA */ 1764 /* Restart HBA */
1785 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1786 "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, 1766 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1787 phba->hba_state, psli->sli_flag); 1767 phba->hba_state, psli->sli_flag);
1788 1768
1789 word0 = 0; 1769 word0 = 0;
@@ -1805,7 +1785,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1805 skip_post = 0; 1785 skip_post = 0;
1806 word0 = 0; /* This is really setting up word1 */ 1786 word0 = 0; /* This is really setting up word1 */
1807 } 1787 }
1808 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1788 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1809 writel(*(uint32_t *) mb, to_slim); 1789 writel(*(uint32_t *) mb, to_slim);
1810 readl(to_slim); /* flush */ 1790 readl(to_slim); /* flush */
1811 1791
@@ -1815,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1815 1795
1816 spin_unlock_irq(phba->host->host_lock); 1796 spin_unlock_irq(phba->host->host_lock);
1817 1797
1798 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1799 psli->stats_start = get_seconds();
1800
1818 if (skip_post) 1801 if (skip_post)
1819 mdelay(100); 1802 mdelay(100);
1820 else 1803 else
@@ -1925,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1925 } 1908 }
1926 1909
1927 while (resetcount < 2 && !done) { 1910 while (resetcount < 2 && !done) {
1911 spin_lock_irq(phba->host->host_lock);
1912 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1913 spin_unlock_irq(phba->host->host_lock);
1928 phba->hba_state = LPFC_STATE_UNKNOWN; 1914 phba->hba_state = LPFC_STATE_UNKNOWN;
1929 lpfc_sli_brdrestart(phba); 1915 lpfc_sli_brdrestart(phba);
1930 msleep(2500); 1916 msleep(2500);
@@ -1932,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1932 if (rc) 1918 if (rc)
1933 break; 1919 break;
1934 1920
1921 spin_lock_irq(phba->host->host_lock);
1922 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1923 spin_unlock_irq(phba->host->host_lock);
1935 resetcount++; 1924 resetcount++;
1936 1925
1937 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1926 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2217,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2217 return (MBX_NOT_FINISHED); 2206 return (MBX_NOT_FINISHED);
2218 } 2207 }
2219 /* timeout active mbox command */ 2208 /* timeout active mbox command */
2220 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 2209 mod_timer(&psli->mbox_tmo, (jiffies +
2210 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2221 } 2211 }
2222 2212
2223 /* Mailbox cmd <cmd> issue */ 2213 /* Mailbox cmd <cmd> issue */
@@ -2277,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2277 break; 2267 break;
2278 2268
2279 case MBX_POLL: 2269 case MBX_POLL:
2280 i = 0;
2281 psli->mbox_active = NULL; 2270 psli->mbox_active = NULL;
2282 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2271 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2283 /* First read mbox status word */ 2272 /* First read mbox status word */
@@ -2291,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2291 /* Read the HBA Host Attention Register */ 2280 /* Read the HBA Host Attention Register */
2292 ha_copy = readl(phba->HAregaddr); 2281 ha_copy = readl(phba->HAregaddr);
2293 2282
2283 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2284 i *= 1000; /* Convert to ms */
2285
2294 /* Wait for command to complete */ 2286 /* Wait for command to complete */
2295 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2287 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2296 (!(ha_copy & HA_MBATT) && 2288 (!(ha_copy & HA_MBATT) &&
2297 (phba->hba_state > LPFC_WARM_START))) { 2289 (phba->hba_state > LPFC_WARM_START))) {
2298 if (i++ >= 100) { 2290 if (i-- <= 0) {
2299 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2300 spin_unlock_irqrestore(phba->host->host_lock, 2292 spin_unlock_irqrestore(phba->host->host_lock,
2301 drvr_flag); 2293 drvr_flag);
@@ -2313,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2313 2305
2314 /* Can be in interrupt context, do not sleep */ 2306 /* Can be in interrupt context, do not sleep */
2315 /* (or might be called with interrupts disabled) */ 2307 /* (or might be called with interrupts disabled) */
2316 mdelay(i); 2308 mdelay(1);
2317 2309
2318 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2310 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2319 2311
@@ -2659,8 +2651,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2659 2651
2660 INIT_LIST_HEAD(&(pring->txq)); 2652 INIT_LIST_HEAD(&(pring->txq));
2661 2653
2662 kfree(pring->fast_lookup);
2663 pring->fast_lookup = NULL;
2664 } 2654 }
2665 2655
2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2656 spin_unlock_irqrestore(phba->host->host_lock, flags);
@@ -3030,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3030 3020
3031 if (timeleft == 0) { 3021 if (timeleft == 0) {
3032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3033 "%d:0329 IOCB wait timeout error - no " 3023 "%d:0338 IOCB wait timeout error - no "
3034 "wake response Data x%x\n", 3024 "wake response Data x%x\n",
3035 phba->brd_no, timeout); 3025 phba->brd_no, timeout);
3036 retval = IOCB_TIMEDOUT; 3026 retval = IOCB_TIMEDOUT;
@@ -3110,6 +3100,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3110 return retval; 3100 return retval;
3111} 3101}
3112 3102
3103int
3104lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3105{
3106 int i = 0;
3107
3108 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3109 if (i++ > LPFC_MBOX_TMO * 1000)
3110 return 1;
3111
3112 if (lpfc_sli_handle_mb_event(phba) == 0)
3113 i = 0;
3114
3115 msleep(1);
3116 }
3117
3118 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3119}
3120
3113irqreturn_t 3121irqreturn_t
3114lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3122lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3115{ 3123{
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a52d6c6cf083..e26de6809358 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -135,8 +135,6 @@ struct lpfc_sli_ring {
135 uint32_t fast_iotag; /* max fastlookup based iotag */ 135 uint32_t fast_iotag; /* max fastlookup based iotag */
136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
137 uint32_t iotag_max; /* max iotag value to use */ 137 uint32_t iotag_max; /* max iotag value to use */
138 struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
139 iotag */
140 struct list_head txq; 138 struct list_head txq;
141 uint16_t txq_cnt; /* current length of queue */ 139 uint16_t txq_cnt; /* current length of queue */
142 uint16_t txq_max; /* max length */ 140 uint16_t txq_max; /* max length */
@@ -174,6 +172,18 @@ struct lpfc_sli_stat {
174 uint32_t mbox_busy; /* Mailbox cmd busy */ 172 uint32_t mbox_busy; /* Mailbox cmd busy */
175}; 173};
176 174
175/* Structure to store link status values when port stats are reset */
176struct lpfc_lnk_stat {
177 uint32_t link_failure_count;
178 uint32_t loss_of_sync_count;
179 uint32_t loss_of_signal_count;
180 uint32_t prim_seq_protocol_err_count;
181 uint32_t invalid_tx_word_count;
182 uint32_t invalid_crc_count;
183 uint32_t error_frames;
184 uint32_t link_events;
185};
186
177/* Structure used to hold SLI information */ 187/* Structure used to hold SLI information */
178struct lpfc_sli { 188struct lpfc_sli {
179 uint32_t num_rings; 189 uint32_t num_rings;
@@ -203,6 +213,8 @@ struct lpfc_sli {
203 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ 213 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
204 size_t iocbq_lookup_len; /* current lengs of the array */ 214 size_t iocbq_lookup_len; /* current lengs of the array */
205 uint16_t last_iotag; /* last allocated IOTAG */ 215 uint16_t last_iotag; /* last allocated IOTAG */
216 unsigned long stats_start; /* in seconds */
217 struct lpfc_lnk_stat lnk_stat_offsets;
206}; 218};
207 219
208/* Given a pointer to the start of the ring, and the slot number of 220/* Given a pointer to the start of the ring, and the slot number of
@@ -213,3 +225,9 @@ struct lpfc_sli {
213 225
214#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 226#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
215 command */ 227 command */
228#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
229 * or erase cmds. This is especially
230 * long because of the potential of
231 * multiple flash erases that can be
232 * spawned.
233 */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b737568b831..ac417908b407 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.6" 21#define LPFC_DRIVER_VERSION "8.1.10"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 93edaa8696cf..6422de72bf43 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
378 int nseg; 378 int nseg;
379 379
380 total = 0; 380 total = 0;
381 scl = (struct scatterlist *) cmd->buffer; 381 scl = (struct scatterlist *) cmd->request_buffer;
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
383 cmd->sc_data_direction); 383 cmd->sc_data_direction);
384 for (i = 0; i < nseg; ++i) { 384 for (i = 0; i < nseg; ++i) {
@@ -431,7 +431,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
431 struct fsc_state *state; 431 struct fsc_state *state;
432 struct Scsi_Host *host; 432 struct Scsi_Host *host;
433 void *dma_cmd_space; 433 void *dma_cmd_space;
434 unsigned char *clkprop; 434 const unsigned char *clkprop;
435 int proplen, rc = -ENODEV; 435 int proplen, rc = -ENODEV;
436 436
437 if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { 437 if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index e31fadd61904..118206d68c6c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -43,9 +43,6 @@
43 43
44/* #define DEBUG_MAC_ESP */ 44/* #define DEBUG_MAC_ESP */
45 45
46#define mac_turnon_irq(x) mac_enable_irq(x)
47#define mac_turnoff_irq(x) mac_disable_irq(x)
48
49extern void esp_handle(struct NCR_ESP *esp); 46extern void esp_handle(struct NCR_ESP *esp);
50extern void mac_esp_intr(int irq, void *dev_id, struct pt_regs *pregs); 47extern void mac_esp_intr(int irq, void *dev_id, struct pt_regs *pregs);
51 48
@@ -639,13 +636,13 @@ static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
639 636
640static void dma_ints_off(struct NCR_ESP * esp) 637static void dma_ints_off(struct NCR_ESP * esp)
641{ 638{
642 mac_turnoff_irq(esp->irq); 639 disable_irq(esp->irq);
643} 640}
644 641
645 642
646static void dma_ints_on(struct NCR_ESP * esp) 643static void dma_ints_on(struct NCR_ESP * esp)
647{ 644{
648 mac_turnon_irq(esp->irq); 645 enable_irq(esp->irq);
649} 646}
650 647
651/* 648/*
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 777f9bcd1179..a942a21dd87e 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -65,9 +65,6 @@
65#define RESET_BOOT 65#define RESET_BOOT
66#define DRIVER_SETUP 66#define DRIVER_SETUP
67 67
68#define ENABLE_IRQ() mac_enable_irq( IRQ_MAC_SCSI );
69#define DISABLE_IRQ() mac_disable_irq( IRQ_MAC_SCSI );
70
71extern void via_scsi_clear(void); 68extern void via_scsi_clear(void);
72 69
73#ifdef RESET_BOOT 70#ifdef RESET_BOOT
@@ -351,7 +348,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
351 printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); 348 printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
352 349
353 /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ 350 /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
354 mac_disable_irq(IRQ_MAC_SCSI); 351 disable_irq(IRQ_MAC_SCSI);
355 352
356 /* get in phase */ 353 /* get in phase */
357 NCR5380_write( TARGET_COMMAND_REG, 354 NCR5380_write( TARGET_COMMAND_REG,
@@ -369,7 +366,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
369 barrier(); 366 barrier();
370 367
371 /* switch on SCSI IRQ again */ 368 /* switch on SCSI IRQ again */
372 mac_enable_irq(IRQ_MAC_SCSI); 369 enable_irq(IRQ_MAC_SCSI);
373 370
374 printk(KERN_INFO " done\n" ); 371 printk(KERN_INFO " done\n" );
375} 372}
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5d2cefb5e52d..b87bef69ba0f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2822,9 +2822,7 @@ mega_print_inquiry(char *page, char *scsi_inq)
2822 2822
2823 i = scsi_inq[0] & 0x1f; 2823 i = scsi_inq[0] & 0x1f;
2824 2824
2825 len += sprintf(page+len, " Type: %s ", 2825 len += sprintf(page+len, " Type: %s ", scsi_device_type(i));
2826 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
2827 "Unknown ");
2828 2826
2829 len += sprintf(page+len, 2827 len += sprintf(page+len,
2830 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 2828 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
@@ -3658,8 +3656,9 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3658 * Send the request sense data also, irrespective of 3656 * Send the request sense data also, irrespective of
3659 * whether the user has asked for it or not. 3657 * whether the user has asked for it or not.
3660 */ 3658 */
3661 copy_to_user(upthru->reqsensearea, 3659 if (copy_to_user(upthru->reqsensearea,
3662 pthru->reqsensearea, 14); 3660 pthru->reqsensearea, 14))
3661 rval = -EFAULT;
3663 3662
3664freemem_and_return: 3663freemem_and_return:
3665 if( pthru->dataxferlen ) { 3664 if( pthru->dataxferlen ) {
@@ -4714,7 +4713,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4714 4713
4715 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4714 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4716 megaraid_isr_memmapped : megaraid_isr_iomapped, 4715 megaraid_isr_memmapped : megaraid_isr_iomapped,
4717 SA_SHIRQ, "megaraid", adapter)) { 4716 IRQF_SHARED, "megaraid", adapter)) {
4718 printk(KERN_WARNING 4717 printk(KERN_WARNING
4719 "megaraid: Couldn't register IRQ %d!\n", irq); 4718 "megaraid: Couldn't register IRQ %d!\n", irq);
4720 goto out_free_scb_list; 4719 goto out_free_scb_list;
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343228ad..8cd0bd1d0f7c 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
37#define LSI_MAX_CHANNELS 16 37#define LSI_MAX_CHANNELS 16
38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) 38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
39 39
40#define HBA_SIGNATURE_64_BIT 0x299
41#define PCI_CONF_AMISIG64 0xa4
42
43#define MEGA_SCSI_INQ_EVPD 1
44#define MEGA_INVALID_FIELD_IN_CDB 0x24
45
40 46
41/** 47/**
42 * scb_t - scsi command control block 48 * scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee144a1c3..b8aa34202ec3 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
132/* Driver Data: */ 132/* Driver Data: */
133 void __user * user_data; 133 void __user * user_data;
134 uint32_t user_data_len; 134 uint32_t user_data_len;
135
136 /* 64bit alignment */
137 uint32_t pad_for_64bit_align;
138
135 mraid_passthru_t __user *user_pthru; 139 mraid_passthru_t __user *user_pthru;
136 140
137 mraid_passthru_t *pthru32; 141 mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index bec1424eda85..266b3910846b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.8 (Apr 11 2006) 13 * Version : v2.20.4.9 (Jul 16 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -330,6 +330,21 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
330 NULL, 330 NULL,
331}; 331};
332 332
333/**
334 * megaraid_change_queue_depth - Change the device's queue depth
335 * @sdev: scsi device struct
336 * @qdepth: depth to set
337 *
338 * Return value:
339 * actual depth set
340 **/
341static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
342{
343 if (qdepth > MBOX_MAX_SCSI_CMDS)
344 qdepth = MBOX_MAX_SCSI_CMDS;
345 scsi_adjust_queue_depth(sdev, 0, qdepth);
346 return sdev->queue_depth;
347}
333 348
334/* 349/*
335 * Scsi host template for megaraid unified driver 350 * Scsi host template for megaraid unified driver
@@ -343,6 +358,7 @@ static struct scsi_host_template megaraid_template_g = {
343 .eh_device_reset_handler = megaraid_reset_handler, 358 .eh_device_reset_handler = megaraid_reset_handler,
344 .eh_bus_reset_handler = megaraid_reset_handler, 359 .eh_bus_reset_handler = megaraid_reset_handler,
345 .eh_host_reset_handler = megaraid_reset_handler, 360 .eh_host_reset_handler = megaraid_reset_handler,
361 .change_queue_depth = megaraid_change_queue_depth,
346 .use_clustering = ENABLE_CLUSTERING, 362 .use_clustering = ENABLE_CLUSTERING,
347 .sdev_attrs = megaraid_sdev_attrs, 363 .sdev_attrs = megaraid_sdev_attrs,
348 .shost_attrs = megaraid_shost_attrs, 364 .shost_attrs = megaraid_shost_attrs,
@@ -714,12 +730,13 @@ megaraid_io_detach(adapter_t *adapter)
714 * . Allocate memory required for all the commands 730 * . Allocate memory required for all the commands
715 * . Use internal library of FW routines, build up complete soft state 731 * . Use internal library of FW routines, build up complete soft state
716 */ 732 */
717static int __init 733static int __devinit
718megaraid_init_mbox(adapter_t *adapter) 734megaraid_init_mbox(adapter_t *adapter)
719{ 735{
720 struct pci_dev *pdev; 736 struct pci_dev *pdev;
721 mraid_device_t *raid_dev; 737 mraid_device_t *raid_dev;
722 int i; 738 int i;
739 uint32_t magic64;
723 740
724 741
725 adapter->ito = MBOX_TIMEOUT; 742 adapter->ito = MBOX_TIMEOUT;
@@ -767,7 +784,7 @@ megaraid_init_mbox(adapter_t *adapter)
767 // 784 //
768 785
769 // request IRQ and register the interrupt service routine 786 // request IRQ and register the interrupt service routine
770 if (request_irq(adapter->irq, megaraid_isr, SA_SHIRQ, "megaraid", 787 if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
771 adapter)) { 788 adapter)) {
772 789
773 con_log(CL_ANN, (KERN_WARNING 790 con_log(CL_ANN, (KERN_WARNING
@@ -863,12 +880,33 @@ megaraid_init_mbox(adapter_t *adapter)
863 880
864 // Set the DMA mask to 64-bit. All supported controllers as capable of 881 // Set the DMA mask to 64-bit. All supported controllers as capable of
865 // DMA in this range 882 // DMA in this range
866 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { 883 pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
867 884
868 con_log(CL_ANN, (KERN_WARNING 885 if (((magic64 == HBA_SIGNATURE_64_BIT) &&
869 "megaraid: could not set DMA mask for 64-bit.\n")); 886 ((adapter->pdev->subsystem_device !=
887 PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
888 (adapter->pdev->subsystem_device !=
889 PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
890 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
891 adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
892 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
893 adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
894 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
895 adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
896 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
897 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
898 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
899 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
900 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
901 con_log(CL_ANN, (KERN_WARNING
902 "megaraid: DMA mask for 64-bit failed\n"));
870 903
871 goto out_free_sysfs_res; 904 if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
905 con_log(CL_ANN, (KERN_WARNING
906 "megaraid: 32-bit DMA mask failed\n"));
907 goto out_free_sysfs_res;
908 }
909 }
872 } 910 }
873 911
874 // setup tasklet for DPC 912 // setup tasklet for DPC
@@ -1622,6 +1660,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1622 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1660 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1623 } 1661 }
1624 1662
1663 if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1664 scp->sense_buffer[0] = 0x70;
1665 scp->sense_buffer[2] = ILLEGAL_REQUEST;
1666 scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1667 scp->result = CHECK_CONDITION << 1;
1668 return NULL;
1669 }
1670
1625 /* Fall through */ 1671 /* Fall through */
1626 1672
1627 case READ_CAPACITY: 1673 case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0ec93e7..2b5a3285f799 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.8" 24#define MEGARAID_VERSION "2.20.4.9"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" 25#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534fb336b..d85b9a8f1b8d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mm.c 12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.6 (Mar 7 2005) 13 * Version : v2.20.2.7 (Jul 16 2006)
14 * 14 *
15 * Common management module 15 * Common management module
16 */ 16 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d6849d..c8762b2b8ed1 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
27#include "megaraid_ioctl.h" 27#include "megaraid_ioctl.h"
28 28
29 29
30#define LSI_COMMON_MOD_VERSION "2.20.2.6" 30#define LSI_COMMON_MOD_VERSION "2.20.2.7"
31#define LSI_COMMON_MOD_EXT_VERSION \ 31#define LSI_COMMON_MOD_EXT_VERSION \
32 "(Release Date: Mon Mar 7 00:01:03 EST 2005)" 32 "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
33 33
34 34
35#define LSI_DBGLVL dbglevel 35#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 0c9516ff636f..4cab5b534b25 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.04 13 * Version : v00.00.03.01
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -53,25 +53,15 @@ MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
53 */ 53 */
54static struct pci_device_id megasas_pci_table[] = { 54static struct pci_device_id megasas_pci_table[] = {
55 55
56 { 56 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
57 PCI_VENDOR_ID_LSI_LOGIC, 57 /* xscale IOP */
58 PCI_DEVICE_ID_LSI_SAS1064R, // xscale IOP 58 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
59 PCI_ANY_ID, 59 /* ppc IOP */
60 PCI_ANY_ID, 60 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
61 }, 61 /* xscale IOP, vega */
62 { 62 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
63 PCI_VENDOR_ID_LSI_LOGIC, 63 /* xscale IOP */
64 PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP 64 {}
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
69 PCI_VENDOR_ID_DELL,
70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP
71 PCI_ANY_ID,
72 PCI_ANY_ID,
73 },
74 {0} /* Terminating entry */
75}; 65};
76 66
77MODULE_DEVICE_TABLE(pci, megasas_pci_table); 67MODULE_DEVICE_TABLE(pci, megasas_pci_table);
@@ -289,9 +279,14 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
289 * @regs: MFI register set 279 * @regs: MFI register set
290 */ 280 */
291static inline void 281static inline void
292megasas_disable_intr(struct megasas_register_set __iomem * regs) 282megasas_disable_intr(struct megasas_instance *instance)
293{ 283{
294 u32 mask = 0x1f; 284 u32 mask = 0x1f;
285 struct megasas_register_set __iomem *regs = instance->reg_set;
286
287 if(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078R)
288 mask = 0xffffffff;
289
295 writel(mask, &regs->outbound_intr_mask); 290 writel(mask, &regs->outbound_intr_mask);
296 291
297 /* Dummy readl to force pci flush */ 292 /* Dummy readl to force pci flush */
@@ -1260,7 +1255,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1260 /* 1255 /*
1261 * Bring it to READY state; assuming max wait 2 secs 1256 * Bring it to READY state; assuming max wait 2 secs
1262 */ 1257 */
1263 megasas_disable_intr(instance->reg_set); 1258 megasas_disable_intr(instance);
1264 writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell); 1259 writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell);
1265 1260
1266 max_wait = 10; 1261 max_wait = 10;
@@ -1757,6 +1752,11 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1757 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); 1752 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1758 1753
1759 /* 1754 /*
1755 * disable the intr before firing the init frame to FW
1756 */
1757 megasas_disable_intr(instance);
1758
1759 /*
1760 * Issue the init frame in polled mode 1760 * Issue the init frame in polled mode
1761 */ 1761 */
1762 if (megasas_issue_polled(instance, cmd)) { 1762 if (megasas_issue_polled(instance, cmd)) {
@@ -2191,7 +2191,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2191 /* 2191 /*
2192 * Register IRQ 2192 * Register IRQ
2193 */ 2193 */
2194 if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) { 2194 if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) {
2195 printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); 2195 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
2196 goto fail_irq; 2196 goto fail_irq;
2197 } 2197 }
@@ -2234,7 +2234,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2234 megasas_mgmt_info.max_index--; 2234 megasas_mgmt_info.max_index--;
2235 2235
2236 pci_set_drvdata(pdev, NULL); 2236 pci_set_drvdata(pdev, NULL);
2237 megasas_disable_intr(instance->reg_set); 2237 megasas_disable_intr(instance);
2238 free_irq(instance->pdev->irq, instance); 2238 free_irq(instance->pdev->irq, instance);
2239 2239
2240 megasas_release_mfi(instance); 2240 megasas_release_mfi(instance);
@@ -2364,7 +2364,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
2364 2364
2365 pci_set_drvdata(instance->pdev, NULL); 2365 pci_set_drvdata(instance->pdev, NULL);
2366 2366
2367 megasas_disable_intr(instance->reg_set); 2367 megasas_disable_intr(instance);
2368 2368
2369 free_irq(instance->pdev->irq, instance); 2369 free_irq(instance->pdev->irq, instance);
2370 2370
@@ -2838,7 +2838,7 @@ static int __init megasas_init(void)
2838 /* 2838 /*
2839 * Register ourselves as PCI hotplug module 2839 * Register ourselves as PCI hotplug module
2840 */ 2840 */
2841 rval = pci_module_init(&megasas_pci_driver); 2841 rval = pci_register_driver(&megasas_pci_driver);
2842 2842
2843 if (rval) { 2843 if (rval) {
2844 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); 2844 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 927d6ffef05f..3531a14222a7 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,16 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.02.04" 21#define MEGASAS_VERSION "00.00.03.01"
22#define MEGASAS_RELDATE "Feb 03, 2006" 22#define MEGASAS_RELDATE "May 14, 2006"
23#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006" 23#define MEGASAS_EXT_VERSION "Sun May 14 22:49:52 PDT 2006"
24
25/*
26 * Device IDs
27 */
28#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
29#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
30
24/* 31/*
25 * ===================================== 32 * =====================================
26 * MegaRAID SAS MFI firmware definitions 33 * MegaRAID SAS MFI firmware definitions
@@ -554,7 +561,11 @@ struct megasas_ctrl_info {
554#define MFI_POLL_TIMEOUT_SECS 10 561#define MFI_POLL_TIMEOUT_SECS 10
555 562
556#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 563#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
557#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060 564
565/*
566* register set for both 1068 and 1078 controllers
567* structure extended for 1078 registers
568*/
558 569
559struct megasas_register_set { 570struct megasas_register_set {
560 u32 reserved_0[4]; /*0000h*/ 571 u32 reserved_0[4]; /*0000h*/
@@ -1150,10 +1161,10 @@ struct compat_megasas_iocpacket {
1150 struct compat_iovec sgl[MAX_IOCTL_SGE]; 1161 struct compat_iovec sgl[MAX_IOCTL_SGE];
1151} __attribute__ ((packed)); 1162} __attribute__ ((packed));
1152 1163
1164#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
1153#endif 1165#endif
1154 1166
1155#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket) 1167#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
1156#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
1157#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen) 1168#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
1158 1169
1159struct megasas_mgmt_info { 1170struct megasas_mgmt_info {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f852421002ef..683fc7ae4b8f 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -18,7 +18,6 @@
18 * - retry arbitration if lost (unless higher levels do this for us) 18 * - retry arbitration if lost (unless higher levels do this for us)
19 * - power down the chip when no device is detected 19 * - power down the chip when no device is detected
20 */ 20 */
21#include <linux/config.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
@@ -1269,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1269 if (cmd->use_sg > 0) { 1268 if (cmd->use_sg > 0) {
1270 int nseg; 1269 int nseg;
1271 total = 0; 1270 total = 0;
1272 scl = (struct scatterlist *) cmd->buffer; 1271 scl = (struct scatterlist *) cmd->request_buffer;
1273 off = ms->data_ptr; 1272 off = ms->data_ptr;
1274 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1275 cmd->sc_data_direction); 1274 cmd->sc_data_direction);
@@ -1757,16 +1756,23 @@ static void set_mesh_power(struct mesh_state *ms, int state)
1757 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0); 1756 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
1758 msleep(10); 1757 msleep(10);
1759 } 1758 }
1760} 1759}
1761 1760
1762 1761
1763#ifdef CONFIG_PM 1762#ifdef CONFIG_PM
1764static int mesh_suspend(struct macio_dev *mdev, pm_message_t state) 1763static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1765{ 1764{
1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1765 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1767 unsigned long flags; 1766 unsigned long flags;
1768 1767
1769 if (state.event == mdev->ofdev.dev.power.power_state.event || state.event < 2) 1768 switch (mesg.event) {
1769 case PM_EVENT_SUSPEND:
1770 case PM_EVENT_FREEZE:
1771 break;
1772 default:
1773 return 0;
1774 }
1775 if (mesg.event == mdev->ofdev.dev.power.power_state.event)
1770 return 0; 1776 return 0;
1771 1777
1772 scsi_block_requests(ms->host); 1778 scsi_block_requests(ms->host);
@@ -1781,7 +1787,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t state)
1781 disable_irq(ms->meshintr); 1787 disable_irq(ms->meshintr);
1782 set_mesh_power(ms, 0); 1788 set_mesh_power(ms, 0);
1783 1789
1784 mdev->ofdev.dev.power.power_state = state; 1790 mdev->ofdev.dev.power.power_state = mesg;
1785 1791
1786 return 0; 1792 return 0;
1787} 1793}
@@ -1851,7 +1857,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1851{ 1857{
1852 struct device_node *mesh = macio_get_of_node(mdev); 1858 struct device_node *mesh = macio_get_of_node(mdev);
1853 struct pci_dev* pdev = macio_get_pci_dev(mdev); 1859 struct pci_dev* pdev = macio_get_pci_dev(mdev);
1854 int tgt, *cfp, minper; 1860 int tgt, minper;
1861 const int *cfp;
1855 struct mesh_state *ms; 1862 struct mesh_state *ms;
1856 struct Scsi_Host *mesh_host; 1863 struct Scsi_Host *mesh_host;
1857 void *dma_cmd_space; 1864 void *dma_cmd_space;
@@ -1940,7 +1947,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1940 ms->tgts[tgt].current_req = NULL; 1947 ms->tgts[tgt].current_req = NULL;
1941 } 1948 }
1942 1949
1943 if ((cfp = (int *) get_property(mesh, "clock-frequency", NULL))) 1950 if ((cfp = get_property(mesh, "clock-frequency", NULL)))
1944 ms->clk_freq = *cfp; 1951 ms->clk_freq = *cfp;
1945 else { 1952 else {
1946 printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); 1953 printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index cb367c2c5c78..9b991b746d1e 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -29,7 +29,7 @@ static irqreturn_t mvme147_intr (int irq, void *dummy, struct pt_regs *fp)
29 return IRQ_HANDLED; 29 return IRQ_HANDLED;
30} 30}
31 31
32static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 32static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
33{ 33{
34 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
35 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -57,7 +57,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
57 return 0; 57 return 0;
58} 58}
59 59
60static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 60static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
61 int status) 61 int status)
62{ 62{
63 m147_pcc->dma_cntrl = 0; 63 m147_pcc->dma_cntrl = 0;
@@ -112,7 +112,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
112 return 0; 112 return 0;
113} 113}
114 114
115static int mvme147_bus_reset(Scsi_Cmnd *cmd) 115static int mvme147_bus_reset(struct scsi_cmnd *cmd)
116{ 116{
117 /* FIXME perform bus-specific reset */ 117 /* FIXME perform bus-specific reset */
118 118
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 2f56d69bd180..32aee85434d8 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -12,10 +12,6 @@
12 12
13int mvme147_detect(struct scsi_host_template *); 13int mvme147_detect(struct scsi_host_template *);
14int mvme147_release(struct Scsi_Host *); 14int mvme147_release(struct Scsi_Host *);
15const char *wd33c93_info(void);
16int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
17int wd33c93_abort(Scsi_Cmnd *);
18int wd33c93_reset(Scsi_Cmnd *, unsigned int);
19 15
20#ifndef CMD_PER_LUN 16#ifndef CMD_PER_LUN
21#define CMD_PER_LUN 2 17#define CMD_PER_LUN 2
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6ab035590ee6..b28712df0b77 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5118,8 +5118,7 @@ static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp)
5118 cp->host_status &= ~HS_SKIPMASK; 5118 cp->host_status &= ~HS_SKIPMASK;
5119 cp->start.schedule.l_paddr = 5119 cp->start.schedule.l_paddr =
5120 cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); 5120 cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
5121 list_del(&cp->link_ccbq); 5121 list_move_tail(&cp->link_ccbq, &lp->skip_ccbq);
5122 list_add_tail(&cp->link_ccbq, &lp->skip_ccbq);
5123 if (cp->queued) { 5122 if (cp->queued) {
5124 --lp->queuedccbs; 5123 --lp->queuedccbs;
5125 } 5124 }
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index 0e4e46a01336..78818b6684f8 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -53,10 +53,8 @@
53#ifndef NCR53C8XX_H 53#ifndef NCR53C8XX_H
54#define NCR53C8XX_H 54#define NCR53C8XX_H
55 55
56#include <linux/config.h>
57#include <scsi/scsi_host.h> 56#include <scsi/scsi_host.h>
58 57
59#include <linux/config.h>
60 58
61/* 59/*
62** If you want a driver as small as possible, donnot define the 60** If you want a driver as small as possible, donnot define the
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 5c55e152e718..bfb4f49e125d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2866,8 +2866,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2866 */ 2866 */
2867 nsp32_do_bus_reset(data); 2867 nsp32_do_bus_reset(data);
2868 2868
2869 ret = request_irq(host->irq, do_nsp32_isr, 2869 ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
2870 SA_SHIRQ | SA_SAMPLE_RANDOM, "nsp32", data);
2871 if (ret < 0) { 2870 if (ret < 0) {
2872 nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " 2871 nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
2873 "SCSI PCI controller. Interrupt: %d", host->irq); 2872 "SCSI PCI controller. Interrupt: %d", host->irq);
@@ -2886,12 +2885,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
2886 } 2885 }
2887 2886
2888#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 2887#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2889 scsi_add_host (host, &PCIDEV->dev); 2888 ret = scsi_add_host(host, &PCIDEV->dev);
2889 if (ret) {
2890 nsp32_msg(KERN_ERR, "failed to add scsi host");
2891 goto free_region;
2892 }
2890 scsi_scan_host(host); 2893 scsi_scan_host(host);
2891#endif 2894#endif
2892 pci_set_drvdata(PCIDEV, host); 2895 pci_set_drvdata(PCIDEV, host);
2893 return DETECT_OK; 2896 return DETECT_OK;
2894 2897
2898 free_region:
2899 release_region(host->io_port, host->n_io_port);
2900
2895 free_irq: 2901 free_irq:
2896 free_irq(host->irq, data); 2902 free_irq(host->irq, data);
2897 2903
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index dee426f8c07b..dd67a68c5c23 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -6,7 +6,6 @@
6 * Based on cyber_esp.c 6 * Based on cyber_esp.c
7 */ 7 */
8 8
9#include <linux/config.h>
10 9
11#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS) 10#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS)
12#define USE_BOTTOM_HALF 11#define USE_BOTTOM_HALF
@@ -198,7 +197,7 @@ int oktagon_esp_detect(struct scsi_host_template *tpnt)
198 esp->esp_command_dvma = (__u32) cmd_buffer; 197 esp->esp_command_dvma = (__u32) cmd_buffer;
199 198
200 esp->irq = IRQ_AMIGA_PORTS; 199 esp->irq = IRQ_AMIGA_PORTS;
201 request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, 200 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
202 "BSC Oktagon SCSI", esp->ehost); 201 "BSC Oktagon SCSI", esp->ehost);
203 202
204 /* Figure out our scsi ID on the bus */ 203 /* Figure out our scsi ID on the bus */
diff --git a/drivers/scsi/oktagon_io.S b/drivers/scsi/oktagon_io.S
index 08ce8d80d8f5..8a7340b02707 100644
--- a/drivers/scsi/oktagon_io.S
+++ b/drivers/scsi/oktagon_io.S
@@ -23,7 +23,6 @@ int oktag_from_io(long *addr,long *paddr,long len)
23 * is moved to/from the IO register. 23 * is moved to/from the IO register.
24 */ 24 */
25 25
26#include <linux/config.h>
27 26
28#ifdef CONFIG_APUS 27#ifdef CONFIG_APUS
29 28
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
index 011d4d6ca9f9..1e426f5d0ed8 100644
--- a/drivers/scsi/osst.h
+++ b/drivers/scsi/osst.h
@@ -3,7 +3,6 @@
3 */ 3 */
4 4
5#include <asm/byteorder.h> 5#include <asm/byteorder.h>
6#include <linux/config.h>
7#include <linux/completion.h> 6#include <linux/completion.h>
8 7
9/* FIXME - rename and use the following two types or delete them! 8/* FIXME - rename and use the following two types or delete them!
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 1bf96ed8f935..1434209a8ac5 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -454,7 +454,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
454 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); 454 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
455 455
456 if (instance->irq != SCSI_IRQ_NONE) 456 if (instance->irq != SCSI_IRQ_NONE)
457 if (request_irq(instance->irq, pas16_intr, SA_INTERRUPT, "pas16", instance)) { 457 if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) {
458 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 458 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
459 instance->host_no, instance->irq); 459 instance->host_no, instance->irq);
460 instance->irq = SCSI_IRQ_NONE; 460 instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 231f9c311c69..0d4c04e1f3de 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1623,7 +1623,7 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1623 /* Interrupt handler */ 1623 /* Interrupt handler */
1624 link->irq.Handler = &nspintr; 1624 link->irq.Handler = &nspintr;
1625 link->irq.Instance = info; 1625 link->irq.Instance = info;
1626 link->irq.Attributes |= (SA_SHIRQ | SA_SAMPLE_RANDOM); 1626 link->irq.Attributes |= IRQF_SHARED;
1627 1627
1628 /* General socket configuration */ 1628 /* General socket configuration */
1629 link->conf.Attributes = CONF_ENABLE_IRQ; 1629 link->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 9f59827707f0..0b65099acb1a 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -799,7 +799,7 @@ next_entry:
799 data = (struct sym53c500_data *)host->hostdata; 799 data = (struct sym53c500_data *)host->hostdata;
800 800
801 if (irq_level > 0) { 801 if (irq_level > 0) {
802 if (request_irq(irq_level, SYM53C500_intr, SA_SHIRQ, "SYM53C500", host)) { 802 if (request_irq(irq_level, SYM53C500_intr, IRQF_SHARED, "SYM53C500", host)) {
803 printk("SYM53C500: unable to allocate IRQ %d\n", irq_level); 803 printk("SYM53C500: unable to allocate IRQ %d\n", irq_level);
804 goto err_free_scsi; 804 goto err_free_scsi;
805 } 805 }
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
deleted file mode 100644
index 7ebe8e03aa96..000000000000
--- a/drivers/scsi/pdc_adma.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.04"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aPIOMD4 = 0x0003, /* PIO mode 4 */
83
84 /* ADMA_STATUS register bits */
85 aPSD = (1 << 6),
86 aUIRQ = (1 << 4),
87 aPERR = (1 << 0),
88
89 /* CPB bits */
90 cDONE = (1 << 0),
91 cVLD = (1 << 0),
92 cDAT = (1 << 2),
93 cIEN = (1 << 3),
94
95 /* PRD bits */
96 pORD = (1 << 4),
97 pDIRO = (1 << 5),
98 pEND = (1 << 7),
99
100 /* ATA register flags */
101 rIGN = (1 << 5),
102 rEND = (1 << 7),
103
104 /* ATA register addresses */
105 ADMA_REGS_CONTROL = 0x0e,
106 ADMA_REGS_SECTOR_COUNT = 0x12,
107 ADMA_REGS_LBA_LOW = 0x13,
108 ADMA_REGS_LBA_MID = 0x14,
109 ADMA_REGS_LBA_HIGH = 0x15,
110 ADMA_REGS_DEVICE = 0x16,
111 ADMA_REGS_COMMAND = 0x17,
112
113 /* PCI device IDs */
114 board_1841_idx = 0, /* ADMA 2-port controller */
115};
116
117typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
118
119struct adma_port_priv {
120 u8 *pkt;
121 dma_addr_t pkt_dma;
122 adma_state_t state;
123};
124
125static int adma_ata_init_one (struct pci_dev *pdev,
126 const struct pci_device_id *ent);
127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap);
138static void adma_irq_clear(struct ata_port *ap);
139static void adma_eng_timeout(struct ata_port *ap);
140
141static struct scsi_host_template adma_ata_sht = {
142 .module = THIS_MODULE,
143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd,
146 .can_queue = ATA_DEF_QUEUE,
147 .this_id = ATA_SHT_THIS_ID,
148 .sg_tablesize = LIBATA_MAX_PRD,
149 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
150 .emulated = ATA_SHT_EMULATED,
151 .use_clustering = ENABLE_CLUSTERING,
152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
156 .bios_param = ata_std_bios_param,
157};
158
159static const struct ata_port_operations adma_ata_ops = {
160 .port_disable = ata_port_disable,
161 .tf_load = ata_tf_load,
162 .tf_read = ata_tf_read,
163 .check_status = ata_check_status,
164 .check_atapi_dma = adma_check_atapi_dma,
165 .exec_command = ata_exec_command,
166 .dev_select = ata_std_dev_select,
167 .phy_reset = adma_phy_reset,
168 .qc_prep = adma_qc_prep,
169 .qc_issue = adma_qc_issue,
170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
172 .irq_handler = adma_intr,
173 .irq_clear = adma_irq_clear,
174 .port_start = adma_port_start,
175 .port_stop = adma_port_stop,
176 .host_stop = adma_host_stop,
177 .bmdma_stop = adma_bmdma_stop,
178 .bmdma_status = adma_bmdma_status,
179};
180
181static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */
183 {
184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
187 .pio_mask = 0x10, /* pio4 */
188 .udma_mask = 0x1f, /* udma0-4 */
189 .port_ops = &adma_ata_ops,
190 },
191};
192
193static const struct pci_device_id adma_ata_pci_tbl[] = {
194 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
195 board_1841_idx },
196
197 { } /* terminate list */
198};
199
200static struct pci_driver adma_ata_pci_driver = {
201 .name = DRV_NAME,
202 .id_table = adma_ata_pci_tbl,
203 .probe = adma_ata_init_one,
204 .remove = ata_pci_remove_one,
205};
206
207static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
208{
209 return 1; /* ATAPI DMA not yet supported */
210}
211
212static void adma_bmdma_stop(struct ata_queued_cmd *qc)
213{
214 /* nothing */
215}
216
217static u8 adma_bmdma_status(struct ata_port *ap)
218{
219 return 0;
220}
221
222static void adma_irq_clear(struct ata_port *ap)
223{
224 /* nothing */
225}
226
227static void adma_reset_engine(void __iomem *chan)
228{
229 /* reset ADMA to idle state */
230 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
231 udelay(2);
232 writew(aPIOMD4, chan + ADMA_CONTROL);
233 udelay(2);
234}
235
236static void adma_reinit_engine(struct ata_port *ap)
237{
238 struct adma_port_priv *pp = ap->private_data;
239 void __iomem *mmio_base = ap->host_set->mmio_base;
240 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
241
242 /* mask/clear ATA interrupts */
243 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
244 ata_check_status(ap);
245
246 /* reset the ADMA engine */
247 adma_reset_engine(chan);
248
249 /* set in-FIFO threshold to 0x100 */
250 writew(0x100, chan + ADMA_FIFO_IN);
251
252 /* set CPB pointer */
253 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
254
255 /* set out-FIFO threshold to 0x100 */
256 writew(0x100, chan + ADMA_FIFO_OUT);
257
258 /* set CPB count */
259 writew(1, chan + ADMA_CPB_COUNT);
260
261 /* read/discard ADMA status */
262 readb(chan + ADMA_STATUS);
263}
264
265static inline void adma_enter_reg_mode(struct ata_port *ap)
266{
267 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
268
269 writew(aPIOMD4, chan + ADMA_CONTROL);
270 readb(chan + ADMA_STATUS); /* flush */
271}
272
273static void adma_phy_reset(struct ata_port *ap)
274{
275 struct adma_port_priv *pp = ap->private_data;
276
277 pp->state = adma_state_idle;
278 adma_reinit_engine(ap);
279 ata_port_probe(ap);
280 ata_bus_reset(ap);
281}
282
283static void adma_eng_timeout(struct ata_port *ap)
284{
285 struct adma_port_priv *pp = ap->private_data;
286
287 if (pp->state != adma_state_idle) /* healthy paranoia */
288 pp->state = adma_state_mmio;
289 adma_reinit_engine(ap);
290 ata_eng_timeout(ap);
291}
292
293static int adma_fill_sg(struct ata_queued_cmd *qc)
294{
295 struct scatterlist *sg;
296 struct ata_port *ap = qc->ap;
297 struct adma_port_priv *pp = ap->private_data;
298 u8 *buf = pp->pkt;
299 int i = (2 + buf[3]) * 8;
300 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
301
302 ata_for_each_sg(sg, qc) {
303 u32 addr;
304 u32 len;
305
306 addr = (u32)sg_dma_address(sg);
307 *(__le32 *)(buf + i) = cpu_to_le32(addr);
308 i += 4;
309
310 len = sg_dma_len(sg) >> 3;
311 *(__le32 *)(buf + i) = cpu_to_le32(len);
312 i += 4;
313
314 if (ata_sg_is_last(sg, qc))
315 pFLAGS |= pEND;
316 buf[i++] = pFLAGS;
317 buf[i++] = qc->dev->dma_mode & 0xf;
318 buf[i++] = 0; /* pPKLW */
319 buf[i++] = 0; /* reserved */
320
321 *(__le32 *)(buf + i)
322 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
323 i += 4;
324
325 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
326 (unsigned long)addr, len);
327 }
328 return i;
329}
330
331static void adma_qc_prep(struct ata_queued_cmd *qc)
332{
333 struct adma_port_priv *pp = qc->ap->private_data;
334 u8 *buf = pp->pkt;
335 u32 pkt_dma = (u32)pp->pkt_dma;
336 int i = 0;
337
338 VPRINTK("ENTER\n");
339
340 adma_enter_reg_mode(qc->ap);
341 if (qc->tf.protocol != ATA_PROT_DMA) {
342 ata_qc_prep(qc);
343 return;
344 }
345
346 buf[i++] = 0; /* Response flags */
347 buf[i++] = 0; /* reserved */
348 buf[i++] = cVLD | cDAT | cIEN;
349 i++; /* cLEN, gets filled in below */
350
351 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
352 i += 4; /* cNCPB */
353 i += 4; /* cPRD, gets filled in below */
354
355 buf[i++] = 0; /* reserved */
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359
360 /* ATA registers; must be a multiple of 4 */
361 buf[i++] = qc->tf.device;
362 buf[i++] = ADMA_REGS_DEVICE;
363 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
364 buf[i++] = qc->tf.hob_nsect;
365 buf[i++] = ADMA_REGS_SECTOR_COUNT;
366 buf[i++] = qc->tf.hob_lbal;
367 buf[i++] = ADMA_REGS_LBA_LOW;
368 buf[i++] = qc->tf.hob_lbam;
369 buf[i++] = ADMA_REGS_LBA_MID;
370 buf[i++] = qc->tf.hob_lbah;
371 buf[i++] = ADMA_REGS_LBA_HIGH;
372 }
373 buf[i++] = qc->tf.nsect;
374 buf[i++] = ADMA_REGS_SECTOR_COUNT;
375 buf[i++] = qc->tf.lbal;
376 buf[i++] = ADMA_REGS_LBA_LOW;
377 buf[i++] = qc->tf.lbam;
378 buf[i++] = ADMA_REGS_LBA_MID;
379 buf[i++] = qc->tf.lbah;
380 buf[i++] = ADMA_REGS_LBA_HIGH;
381 buf[i++] = 0;
382 buf[i++] = ADMA_REGS_CONTROL;
383 buf[i++] = rIGN;
384 buf[i++] = 0;
385 buf[i++] = qc->tf.command;
386 buf[i++] = ADMA_REGS_COMMAND | rEND;
387
388 buf[3] = (i >> 3) - 2; /* cLEN */
389 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
390
391 i = adma_fill_sg(qc);
392 wmb(); /* flush PRDs and pkt to memory */
393#if 0
394 /* dump out CPB + PRDs for debug */
395 {
396 int j, len = 0;
397 static char obuf[2048];
398 for (j = 0; j < i; ++j) {
399 len += sprintf(obuf+len, "%02x ", buf[j]);
400 if ((j & 7) == 7) {
401 printk("%s\n", obuf);
402 len = 0;
403 }
404 }
405 if (len)
406 printk("%s\n", obuf);
407 }
408#endif
409}
410
411static inline void adma_packet_start(struct ata_queued_cmd *qc)
412{
413 struct ata_port *ap = qc->ap;
414 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
415
416 VPRINTK("ENTER, ap %p\n", ap);
417
418 /* fire up the ADMA engine */
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420}
421
422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{
424 struct adma_port_priv *pp = qc->ap->private_data;
425
426 switch (qc->tf.protocol) {
427 case ATA_PROT_DMA:
428 pp->state = adma_state_pkt;
429 adma_packet_start(qc);
430 return 0;
431
432 case ATA_PROT_ATAPI_DMA:
433 BUG();
434 break;
435
436 default:
437 break;
438 }
439
440 pp->state = adma_state_mmio;
441 return ata_qc_issue_prot(qc);
442}
443
444static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
445{
446 unsigned int handled = 0, port_no;
447 u8 __iomem *mmio_base = host_set->mmio_base;
448
449 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
450 struct ata_port *ap = host_set->ports[port_no];
451 struct adma_port_priv *pp;
452 struct ata_queued_cmd *qc;
453 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
454 u8 status = readb(chan + ADMA_STATUS);
455
456 if (status == 0)
457 continue;
458 handled = 1;
459 adma_enter_reg_mode(ap);
460 if (ap->flags & ATA_FLAG_DISABLED)
461 continue;
462 pp = ap->private_data;
463 if (!pp || pp->state != adma_state_pkt)
464 continue;
465 qc = ata_qc_from_tag(ap, ap->active_tag);
466 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
467 if ((status & (aPERR | aPSD | aUIRQ)))
468 qc->err_mask |= AC_ERR_OTHER;
469 else if (pp->pkt[0] != cDONE)
470 qc->err_mask |= AC_ERR_OTHER;
471
472 ata_qc_complete(qc);
473 }
474 }
475 return handled;
476}
477
478static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
479{
480 unsigned int handled = 0, port_no;
481
482 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
483 struct ata_port *ap;
484 ap = host_set->ports[port_no];
485 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
486 struct ata_queued_cmd *qc;
487 struct adma_port_priv *pp = ap->private_data;
488 if (!pp || pp->state != adma_state_mmio)
489 continue;
490 qc = ata_qc_from_tag(ap, ap->active_tag);
491 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
492
493 /* check main status, clearing INTRQ */
494 u8 status = ata_check_status(ap);
495 if ((status & ATA_BUSY))
496 continue;
497 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
498 ap->id, qc->tf.protocol, status);
499
500 /* complete taskfile transaction */
501 pp->state = adma_state_idle;
502 qc->err_mask |= ac_err_mask(status);
503 ata_qc_complete(qc);
504 handled = 1;
505 }
506 }
507 }
508 return handled;
509}
510
511static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
512{
513 struct ata_host_set *host_set = dev_instance;
514 unsigned int handled = 0;
515
516 VPRINTK("ENTER\n");
517
518 spin_lock(&host_set->lock);
519 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
520 spin_unlock(&host_set->lock);
521
522 VPRINTK("EXIT\n");
523
524 return IRQ_RETVAL(handled);
525}
526
527static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
528{
529 port->cmd_addr =
530 port->data_addr = base + 0x000;
531 port->error_addr =
532 port->feature_addr = base + 0x004;
533 port->nsect_addr = base + 0x008;
534 port->lbal_addr = base + 0x00c;
535 port->lbam_addr = base + 0x010;
536 port->lbah_addr = base + 0x014;
537 port->device_addr = base + 0x018;
538 port->status_addr =
539 port->command_addr = base + 0x01c;
540 port->altstatus_addr =
541 port->ctl_addr = base + 0x038;
542}
543
544static int adma_port_start(struct ata_port *ap)
545{
546 struct device *dev = ap->host_set->dev;
547 struct adma_port_priv *pp;
548 int rc;
549
550 rc = ata_port_start(ap);
551 if (rc)
552 return rc;
553 adma_enter_reg_mode(ap);
554 rc = -ENOMEM;
555 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
556 if (!pp)
557 goto err_out;
558 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
559 GFP_KERNEL);
560 if (!pp->pkt)
561 goto err_out_kfree;
562 /* paranoia? */
563 if ((pp->pkt_dma & 7) != 0) {
564 printk("bad alignment for pp->pkt_dma: %08x\n",
565 (u32)pp->pkt_dma);
566 dma_free_coherent(dev, ADMA_PKT_BYTES,
567 pp->pkt, pp->pkt_dma);
568 goto err_out_kfree;
569 }
570 memset(pp->pkt, 0, ADMA_PKT_BYTES);
571 ap->private_data = pp;
572 adma_reinit_engine(ap);
573 return 0;
574
575err_out_kfree:
576 kfree(pp);
577err_out:
578 ata_port_stop(ap);
579 return rc;
580}
581
582static void adma_port_stop(struct ata_port *ap)
583{
584 struct device *dev = ap->host_set->dev;
585 struct adma_port_priv *pp = ap->private_data;
586
587 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
588 if (pp != NULL) {
589 ap->private_data = NULL;
590 if (pp->pkt != NULL)
591 dma_free_coherent(dev, ADMA_PKT_BYTES,
592 pp->pkt, pp->pkt_dma);
593 kfree(pp);
594 }
595 ata_port_stop(ap);
596}
597
598static void adma_host_stop(struct ata_host_set *host_set)
599{
600 unsigned int port_no;
601
602 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
603 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
604
605 ata_pci_host_stop(host_set);
606}
607
608static void adma_host_init(unsigned int chip_id,
609 struct ata_probe_ent *probe_ent)
610{
611 unsigned int port_no;
612 void __iomem *mmio_base = probe_ent->mmio_base;
613
614 /* enable/lock aGO operation */
615 writeb(7, mmio_base + ADMA_MODE_LOCK);
616
617 /* reset the ADMA logic */
618 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
619 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
620}
621
622static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
623{
624 int rc;
625
626 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
627 if (rc) {
628 dev_printk(KERN_ERR, &pdev->dev,
629 "32-bit DMA enable failed\n");
630 return rc;
631 }
632 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
633 if (rc) {
634 dev_printk(KERN_ERR, &pdev->dev,
635 "32-bit consistent DMA enable failed\n");
636 return rc;
637 }
638 return 0;
639}
640
641static int adma_ata_init_one(struct pci_dev *pdev,
642 const struct pci_device_id *ent)
643{
644 static int printed_version;
645 struct ata_probe_ent *probe_ent = NULL;
646 void __iomem *mmio_base;
647 unsigned int board_idx = (unsigned int) ent->driver_data;
648 int rc, port_no;
649
650 if (!printed_version++)
651 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
652
653 rc = pci_enable_device(pdev);
654 if (rc)
655 return rc;
656
657 rc = pci_request_regions(pdev, DRV_NAME);
658 if (rc)
659 goto err_out;
660
661 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
662 rc = -ENODEV;
663 goto err_out_regions;
664 }
665
666 mmio_base = pci_iomap(pdev, 4, 0);
667 if (mmio_base == NULL) {
668 rc = -ENOMEM;
669 goto err_out_regions;
670 }
671
672 rc = adma_set_dma_masks(pdev, mmio_base);
673 if (rc)
674 goto err_out_iounmap;
675
676 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
677 if (probe_ent == NULL) {
678 rc = -ENOMEM;
679 goto err_out_iounmap;
680 }
681
682 probe_ent->dev = pci_dev_to_dev(pdev);
683 INIT_LIST_HEAD(&probe_ent->node);
684
685 probe_ent->sht = adma_port_info[board_idx].sht;
686 probe_ent->host_flags = adma_port_info[board_idx].host_flags;
687 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
688 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
689 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
690 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
691
692 probe_ent->irq = pdev->irq;
693 probe_ent->irq_flags = SA_SHIRQ;
694 probe_ent->mmio_base = mmio_base;
695 probe_ent->n_ports = ADMA_PORTS;
696
697 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
698 adma_ata_setup_port(&probe_ent->port[port_no],
699 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
700 }
701
702 pci_set_master(pdev);
703
704 /* initialize adapter */
705 adma_host_init(board_idx, probe_ent);
706
707 rc = ata_device_add(probe_ent);
708 kfree(probe_ent);
709 if (rc != ADMA_PORTS)
710 goto err_out_iounmap;
711 return 0;
712
713err_out_iounmap:
714 pci_iounmap(pdev, mmio_base);
715err_out_regions:
716 pci_release_regions(pdev);
717err_out:
718 pci_disable_device(pdev);
719 return rc;
720}
721
722static int __init adma_ata_init(void)
723{
724 return pci_module_init(&adma_ata_pci_driver);
725}
726
727static void __exit adma_ata_exit(void)
728{
729 pci_unregister_driver(&adma_ata_pci_driver);
730}
731
732MODULE_AUTHOR("Mark Lord");
733MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
734MODULE_LICENSE("GPL");
735MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
736MODULE_VERSION(DRV_VERSION);
737
738module_init(adma_ata_init);
739module_exit(adma_ata_exit);
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 83a671799934..0bd9c60e6455 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -13,7 +13,6 @@
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/stat.h> 14#include <linux/stat.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/config.h>
17#ifdef CONFIG_KMOD 16#ifdef CONFIG_KMOD
18#include <linux/kmod.h> 17#include <linux/kmod.h>
19#endif 18#endif
@@ -170,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
170 SCpnt->request->rq_status = RQ_SCSI_BUSY; 169 SCpnt->request->rq_status = RQ_SCSI_BUSY;
171 170
172 SCpnt->done = pluto_detect_done; 171 SCpnt->done = pluto_detect_done;
173 SCpnt->bufflen = 256;
174 SCpnt->buffer = fcs[i].inquiry;
175 SCpnt->request_bufflen = 256; 172 SCpnt->request_bufflen = 256;
176 SCpnt->request_buffer = fcs[i].inquiry; 173 SCpnt->request_buffer = fcs[i].inquiry;
177 PLD(("set up %d %08lx\n", i, (long)SCpnt)) 174 PLD(("set up %d %08lx\n", i, (long)SCpnt))
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 108910f512e4..b0eba39f208a 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -6,11 +6,8 @@
6 * (c) 1995,1996 Grant R. Guenther, grant@torque.net, 6 * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
7 * under the terms of the GNU General Public License. 7 * under the terms of the GNU General Public License.
8 * 8 *
9 * Current Maintainer: David Campbell (Perth, Western Australia, GMT+0800)
10 * campbell@torque.net
11 */ 9 */
12 10
13#include <linux/config.h>
14#include <linux/init.h> 11#include <linux/init.h>
15#include <linux/kernel.h> 12#include <linux/kernel.h>
16#include <linux/module.h> 13#include <linux/module.h>
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index f6e1a1574bb8..7511df3588e4 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -2,7 +2,7 @@
2 * the Iomega ZIP drive 2 * the Iomega ZIP drive
3 * 3 *
4 * (c) 1996 Grant R. Guenther grant@torque.net 4 * (c) 1996 Grant R. Guenther grant@torque.net
5 * David Campbell campbell@torque.net 5 * David Campbell
6 * 6 *
7 * All comments to David. 7 * All comments to David.
8 */ 8 */
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 680f6063954b..8953991462d7 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -192,7 +192,7 @@
192 - Don't walk the entire list in qla1280_putq_t() just to directly 192 - Don't walk the entire list in qla1280_putq_t() just to directly
193 grab the pointer to the last element afterwards 193 grab the pointer to the last element afterwards
194 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen 194 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
195 - Don't use SA_INTERRUPT, it's use is deprecated for this kinda driver 195 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
196 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen 196 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
197 - Set dev->max_sectors to 1024 197 - Set dev->max_sectors to 1024
198 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen 198 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
@@ -331,7 +331,6 @@
331*****************************************************************************/ 331*****************************************************************************/
332 332
333 333
334#include <linux/config.h>
335#include <linux/module.h> 334#include <linux/module.h>
336 335
337#include <linux/version.h> 336#include <linux/version.h>
@@ -4210,7 +4209,7 @@ qla1280_setup(char *s)
4210} 4209}
4211 4210
4212 4211
4213static int 4212static int __init
4214qla1280_get_token(char *str) 4213qla1280_get_token(char *str)
4215{ 4214{
4216 char *sep; 4215 char *sep;
@@ -4370,7 +4369,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4370 /* Disable ISP interrupts. */ 4369 /* Disable ISP interrupts. */
4371 qla1280_disable_intrs(ha); 4370 qla1280_disable_intrs(ha);
4372 4371
4373 if (request_irq(pdev->irq, qla1280_intr_handler, SA_SHIRQ, 4372 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4374 "qla1280", ha)) { 4373 "qla1280", ha)) {
4375 printk("qla1280 : Failed to reserve interrupt %d already " 4374 printk("qla1280 : Failed to reserve interrupt %d already "
4376 "in use\n", pdev->irq); 4375 "in use\n", pdev->irq);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e96d58ded57c..87f90c4f08e9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -16,15 +16,16 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
16{ 16{
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj))); 18 struct device, kobj)));
19 char *rbuf = (char *)ha->fw_dump;
19 20
20 if (ha->fw_dump_reading == 0) 21 if (ha->fw_dump_reading == 0)
21 return 0; 22 return 0;
22 if (off > ha->fw_dump_buffer_len) 23 if (off > ha->fw_dump_len)
23 return 0; 24 return 0;
24 if (off + count > ha->fw_dump_buffer_len) 25 if (off + count > ha->fw_dump_len)
25 count = ha->fw_dump_buffer_len - off; 26 count = ha->fw_dump_len - off;
26 27
27 memcpy(buf, &ha->fw_dump_buffer[off], count); 28 memcpy(buf, &rbuf[off], count);
28 29
29 return (count); 30 return (count);
30} 31}
@@ -36,7 +37,6 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
36 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 37 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
37 struct device, kobj))); 38 struct device, kobj)));
38 int reading; 39 int reading;
39 uint32_t dump_size;
40 40
41 if (off != 0) 41 if (off != 0)
42 return (0); 42 return (0);
@@ -44,46 +44,27 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
44 reading = simple_strtol(buf, NULL, 10); 44 reading = simple_strtol(buf, NULL, 10);
45 switch (reading) { 45 switch (reading) {
46 case 0: 46 case 0:
47 if (ha->fw_dump_reading == 1) { 47 if (!ha->fw_dump_reading)
48 qla_printk(KERN_INFO, ha, 48 break;
49 "Firmware dump cleared on (%ld).\n", ha->host_no);
50 49
51 vfree(ha->fw_dump_buffer); 50 qla_printk(KERN_INFO, ha,
52 ha->fw_dump_buffer = NULL; 51 "Firmware dump cleared on (%ld).\n", ha->host_no);
53 ha->fw_dump_reading = 0; 52
54 ha->fw_dumped = 0; 53 ha->fw_dump_reading = 0;
55 } 54 ha->fw_dumped = 0;
56 break; 55 break;
57 case 1: 56 case 1:
58 if (ha->fw_dumped && !ha->fw_dump_reading) { 57 if (ha->fw_dumped && !ha->fw_dump_reading) {
59 ha->fw_dump_reading = 1; 58 ha->fw_dump_reading = 1;
60 59
61 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
62 dump_size = FW_DUMP_SIZE_24XX;
63 else {
64 dump_size = FW_DUMP_SIZE_1M;
65 if (ha->fw_memory_size < 0x20000)
66 dump_size = FW_DUMP_SIZE_128K;
67 else if (ha->fw_memory_size < 0x80000)
68 dump_size = FW_DUMP_SIZE_512K;
69 }
70 ha->fw_dump_buffer = (char *)vmalloc(dump_size);
71 if (ha->fw_dump_buffer == NULL) {
72 qla_printk(KERN_WARNING, ha,
73 "Unable to allocate memory for firmware "
74 "dump buffer (%d).\n", dump_size);
75
76 ha->fw_dump_reading = 0;
77 return (count);
78 }
79 qla_printk(KERN_INFO, ha, 60 qla_printk(KERN_INFO, ha,
80 "Firmware dump ready for read on (%ld).\n", 61 "Raw firmware dump ready for read on (%ld).\n",
81 ha->host_no); 62 ha->host_no);
82 memset(ha->fw_dump_buffer, 0, dump_size);
83 ha->isp_ops.ascii_fw_dump(ha);
84 ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
85 } 63 }
86 break; 64 break;
65 case 2:
66 qla2x00_alloc_fw_dump(ha);
67 break;
87 } 68 }
88 return (count); 69 return (count);
89} 70}
@@ -313,9 +294,6 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
313 if (!capable(CAP_SYS_ADMIN) || off != 0) 294 if (!capable(CAP_SYS_ADMIN) || off != 0)
314 return 0; 295 return 0;
315 296
316 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
317 return -ENOTSUPP;
318
319 /* Read NVRAM. */ 297 /* Read NVRAM. */
320 spin_lock_irqsave(&ha->hardware_lock, flags); 298 spin_lock_irqsave(&ha->hardware_lock, flags);
321 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size); 299 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size);
@@ -335,9 +313,6 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, char *buf, loff_t off,
335 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) 313 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
336 return 0; 314 return 0;
337 315
338 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
339 return -ENOTSUPP;
340
341 /* Write NVRAM. */ 316 /* Write NVRAM. */
342 spin_lock_irqsave(&ha->hardware_lock, flags); 317 spin_lock_irqsave(&ha->hardware_lock, flags);
343 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 318 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
@@ -357,6 +332,53 @@ static struct bin_attribute sysfs_vpd_attr = {
357 .write = qla2x00_sysfs_write_vpd, 332 .write = qla2x00_sysfs_write_vpd,
358}; 333};
359 334
335static ssize_t
336qla2x00_sysfs_read_sfp(struct kobject *kobj, char *buf, loff_t off,
337 size_t count)
338{
339 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
340 struct device, kobj)));
341 uint16_t iter, addr, offset;
342 int rval;
343
344 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
345 return 0;
346
347 addr = 0xa0;
348 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
349 iter++, offset += SFP_BLOCK_SIZE) {
350 if (iter == 4) {
351 /* Skip to next device address. */
352 addr = 0xa2;
353 offset = 0;
354 }
355
356 rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset,
357 SFP_BLOCK_SIZE);
358 if (rval != QLA_SUCCESS) {
359 qla_printk(KERN_WARNING, ha,
360 "Unable to read SFP data (%x/%x/%x).\n", rval,
361 addr, offset);
362 count = 0;
363 break;
364 }
365 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
366 buf += SFP_BLOCK_SIZE;
367 }
368
369 return count;
370}
371
372static struct bin_attribute sysfs_sfp_attr = {
373 .attr = {
374 .name = "sfp",
375 .mode = S_IRUSR | S_IWUSR,
376 .owner = THIS_MODULE,
377 },
378 .size = SFP_DEV_SIZE * 2,
379 .read = qla2x00_sysfs_read_sfp,
380};
381
360void 382void
361qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 383qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
362{ 384{
@@ -367,7 +389,12 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
367 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 389 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
368 sysfs_create_bin_file(&host->shost_gendev.kobj, 390 sysfs_create_bin_file(&host->shost_gendev.kobj,
369 &sysfs_optrom_ctl_attr); 391 &sysfs_optrom_ctl_attr);
370 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_vpd_attr); 392 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
393 sysfs_create_bin_file(&host->shost_gendev.kobj,
394 &sysfs_vpd_attr);
395 sysfs_create_bin_file(&host->shost_gendev.kobj,
396 &sysfs_sfp_attr);
397 }
371} 398}
372 399
373void 400void
@@ -380,7 +407,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
380 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 407 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
381 sysfs_remove_bin_file(&host->shost_gendev.kobj, 408 sysfs_remove_bin_file(&host->shost_gendev.kobj,
382 &sysfs_optrom_ctl_attr); 409 &sysfs_optrom_ctl_attr);
383 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_vpd_attr); 410 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
411 sysfs_remove_bin_file(&host->shost_gendev.kobj,
412 &sysfs_vpd_attr);
413 sysfs_remove_bin_file(&host->shost_gendev.kobj,
414 &sysfs_sfp_attr);
415 }
384 416
385 if (ha->beacon_blink_led == 1) 417 if (ha->beacon_blink_led == 1)
386 ha->isp_ops.beacon_off(ha); 418 ha->isp_ops.beacon_off(ha);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 74e54713aa7c..f6ed6962bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -8,7 +8,34 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static int qla_uprintf(char **, char *, ...); 11static inline void
12qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
13{
14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
16 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
17 fw_dump->fw_attributes = htonl(ha->fw_attributes);
18
19 fw_dump->vendor = htonl(ha->pdev->vendor);
20 fw_dump->device = htonl(ha->pdev->device);
21 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
22 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
23}
24
25static inline void *
26qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
27{
28 /* Request queue. */
29 memcpy(ptr, ha->request_ring, ha->request_q_length *
30 sizeof(request_t));
31
32 /* Response queue. */
33 ptr += ha->request_q_length * sizeof(request_t);
34 memcpy(ptr, ha->response_ring, ha->response_q_length *
35 sizeof(response_t));
36
37 return ptr + (ha->response_q_length * sizeof(response_t));
38}
12 39
13/** 40/**
14 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 41 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
@@ -49,10 +76,11 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
49 "request...\n", ha->fw_dump); 76 "request...\n", ha->fw_dump);
50 goto qla2300_fw_dump_failed; 77 goto qla2300_fw_dump_failed;
51 } 78 }
52 fw = ha->fw_dump; 79 fw = &ha->fw_dump->isp.isp23;
80 qla2xxx_prep_dump(ha, ha->fw_dump);
53 81
54 rval = QLA_SUCCESS; 82 rval = QLA_SUCCESS;
55 fw->hccr = RD_REG_WORD(&reg->hccr); 83 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
56 84
57 /* Pause RISC. */ 85 /* Pause RISC. */
58 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 86 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
@@ -73,85 +101,86 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
73 if (rval == QLA_SUCCESS) { 101 if (rval == QLA_SUCCESS) {
74 dmp_reg = (uint16_t __iomem *)(reg + 0); 102 dmp_reg = (uint16_t __iomem *)(reg + 0);
75 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 103 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
76 fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++); 104 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
77 105
78 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10); 106 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
79 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 107 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
80 fw->risc_host_reg[cnt] = RD_REG_WORD(dmp_reg++); 108 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
81 109
82 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40); 110 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40);
83 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 111 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
84 fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++); 112 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
85 113
86 WRT_REG_WORD(&reg->ctrl_status, 0x40); 114 WRT_REG_WORD(&reg->ctrl_status, 0x40);
87 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 115 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
88 for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++) 116 for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++)
89 fw->resp_dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 117 fw->resp_dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
90 118
91 WRT_REG_WORD(&reg->ctrl_status, 0x50); 119 WRT_REG_WORD(&reg->ctrl_status, 0x50);
92 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 120 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
93 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 121 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
94 fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 122 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
95 123
96 WRT_REG_WORD(&reg->ctrl_status, 0x00); 124 WRT_REG_WORD(&reg->ctrl_status, 0x00);
97 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0); 125 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
98 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 126 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
99 fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 127 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
100 128
101 WRT_REG_WORD(&reg->pcr, 0x2000); 129 WRT_REG_WORD(&reg->pcr, 0x2000);
102 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 130 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
103 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 131 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
104 fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++); 132 fw->risc_gp0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
105 133
106 WRT_REG_WORD(&reg->pcr, 0x2200); 134 WRT_REG_WORD(&reg->pcr, 0x2200);
107 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 135 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
108 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 136 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
109 fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++); 137 fw->risc_gp1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
110 138
111 WRT_REG_WORD(&reg->pcr, 0x2400); 139 WRT_REG_WORD(&reg->pcr, 0x2400);
112 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 140 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
113 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 141 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
114 fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++); 142 fw->risc_gp2_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
115 143
116 WRT_REG_WORD(&reg->pcr, 0x2600); 144 WRT_REG_WORD(&reg->pcr, 0x2600);
117 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 145 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
118 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 146 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
119 fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++); 147 fw->risc_gp3_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
120 148
121 WRT_REG_WORD(&reg->pcr, 0x2800); 149 WRT_REG_WORD(&reg->pcr, 0x2800);
122 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 150 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
123 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 151 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
124 fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++); 152 fw->risc_gp4_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
125 153
126 WRT_REG_WORD(&reg->pcr, 0x2A00); 154 WRT_REG_WORD(&reg->pcr, 0x2A00);
127 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 155 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
128 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 156 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
129 fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++); 157 fw->risc_gp5_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
130 158
131 WRT_REG_WORD(&reg->pcr, 0x2C00); 159 WRT_REG_WORD(&reg->pcr, 0x2C00);
132 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 160 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
133 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 161 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
134 fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++); 162 fw->risc_gp6_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
135 163
136 WRT_REG_WORD(&reg->pcr, 0x2E00); 164 WRT_REG_WORD(&reg->pcr, 0x2E00);
137 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 165 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
138 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 166 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
139 fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++); 167 fw->risc_gp7_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
140 168
141 WRT_REG_WORD(&reg->ctrl_status, 0x10); 169 WRT_REG_WORD(&reg->ctrl_status, 0x10);
142 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 170 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
143 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 171 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
144 fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 172 fw->frame_buf_hdw_reg[cnt] =
173 htons(RD_REG_WORD(dmp_reg++));
145 174
146 WRT_REG_WORD(&reg->ctrl_status, 0x20); 175 WRT_REG_WORD(&reg->ctrl_status, 0x20);
147 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 176 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
148 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 177 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
149 fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++); 178 fw->fpm_b0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
150 179
151 WRT_REG_WORD(&reg->ctrl_status, 0x30); 180 WRT_REG_WORD(&reg->ctrl_status, 0x30);
152 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 181 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
153 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 182 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
154 fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++); 183 fw->fpm_b1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
155 184
156 /* Reset RISC. */ 185 /* Reset RISC. */
157 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 186 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
@@ -226,7 +255,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
226 255
227 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 256 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
228 rval = mb0 & MBS_MASK; 257 rval = mb0 & MBS_MASK;
229 fw->risc_ram[cnt] = mb2; 258 fw->risc_ram[cnt] = htons(mb2);
230 } else { 259 } else {
231 rval = QLA_FUNCTION_FAILED; 260 rval = QLA_FUNCTION_FAILED;
232 } 261 }
@@ -285,7 +314,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
285 314
286 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 315 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
287 rval = mb0 & MBS_MASK; 316 rval = mb0 & MBS_MASK;
288 fw->stack_ram[cnt] = mb2; 317 fw->stack_ram[cnt] = htons(mb2);
289 } else { 318 } else {
290 rval = QLA_FUNCTION_FAILED; 319 rval = QLA_FUNCTION_FAILED;
291 } 320 }
@@ -345,12 +374,15 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
345 374
346 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 375 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
347 rval = mb0 & MBS_MASK; 376 rval = mb0 & MBS_MASK;
348 fw->data_ram[cnt] = mb2; 377 fw->data_ram[cnt] = htons(mb2);
349 } else { 378 } else {
350 rval = QLA_FUNCTION_FAILED; 379 rval = QLA_FUNCTION_FAILED;
351 } 380 }
352 } 381 }
353 382
383 if (rval == QLA_SUCCESS)
384 qla2xxx_copy_queues(ha, &fw->data_ram[cnt]);
385
354 if (rval != QLA_SUCCESS) { 386 if (rval != QLA_SUCCESS) {
355 qla_printk(KERN_WARNING, ha, 387 qla_printk(KERN_WARNING, ha,
356 "Failed to dump firmware (%x)!!!\n", rval); 388 "Failed to dump firmware (%x)!!!\n", rval);
@@ -369,193 +401,6 @@ qla2300_fw_dump_failed:
369} 401}
370 402
371/** 403/**
372 * qla2300_ascii_fw_dump() - Converts a binary firmware dump to ASCII.
373 * @ha: HA context
374 */
375void
376qla2300_ascii_fw_dump(scsi_qla_host_t *ha)
377{
378 uint32_t cnt;
379 char *uiter;
380 char fw_info[30];
381 struct qla2300_fw_dump *fw;
382 uint32_t data_ram_cnt;
383
384 uiter = ha->fw_dump_buffer;
385 fw = ha->fw_dump;
386
387 qla_uprintf(&uiter, "%s Firmware Version %s\n", ha->model_number,
388 ha->isp_ops.fw_version_str(ha, fw_info));
389
390 qla_uprintf(&uiter, "\n[==>BEG]\n");
391
392 qla_uprintf(&uiter, "HCCR Register:\n%04x\n\n", fw->hccr);
393
394 qla_uprintf(&uiter, "PBIU Registers:");
395 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
396 if (cnt % 8 == 0) {
397 qla_uprintf(&uiter, "\n");
398 }
399 qla_uprintf(&uiter, "%04x ", fw->pbiu_reg[cnt]);
400 }
401
402 qla_uprintf(&uiter, "\n\nReqQ-RspQ-Risc2Host Status registers:");
403 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
404 if (cnt % 8 == 0) {
405 qla_uprintf(&uiter, "\n");
406 }
407 qla_uprintf(&uiter, "%04x ", fw->risc_host_reg[cnt]);
408 }
409
410 qla_uprintf(&uiter, "\n\nMailbox Registers:");
411 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
412 if (cnt % 8 == 0) {
413 qla_uprintf(&uiter, "\n");
414 }
415 qla_uprintf(&uiter, "%04x ", fw->mailbox_reg[cnt]);
416 }
417
418 qla_uprintf(&uiter, "\n\nAuto Request Response DMA Registers:");
419 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
420 if (cnt % 8 == 0) {
421 qla_uprintf(&uiter, "\n");
422 }
423 qla_uprintf(&uiter, "%04x ", fw->resp_dma_reg[cnt]);
424 }
425
426 qla_uprintf(&uiter, "\n\nDMA Registers:");
427 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
428 if (cnt % 8 == 0) {
429 qla_uprintf(&uiter, "\n");
430 }
431 qla_uprintf(&uiter, "%04x ", fw->dma_reg[cnt]);
432 }
433
434 qla_uprintf(&uiter, "\n\nRISC Hardware Registers:");
435 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
436 if (cnt % 8 == 0) {
437 qla_uprintf(&uiter, "\n");
438 }
439 qla_uprintf(&uiter, "%04x ", fw->risc_hdw_reg[cnt]);
440 }
441
442 qla_uprintf(&uiter, "\n\nRISC GP0 Registers:");
443 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
444 if (cnt % 8 == 0) {
445 qla_uprintf(&uiter, "\n");
446 }
447 qla_uprintf(&uiter, "%04x ", fw->risc_gp0_reg[cnt]);
448 }
449
450 qla_uprintf(&uiter, "\n\nRISC GP1 Registers:");
451 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
452 if (cnt % 8 == 0) {
453 qla_uprintf(&uiter, "\n");
454 }
455 qla_uprintf(&uiter, "%04x ", fw->risc_gp1_reg[cnt]);
456 }
457
458 qla_uprintf(&uiter, "\n\nRISC GP2 Registers:");
459 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
460 if (cnt % 8 == 0) {
461 qla_uprintf(&uiter, "\n");
462 }
463 qla_uprintf(&uiter, "%04x ", fw->risc_gp2_reg[cnt]);
464 }
465
466 qla_uprintf(&uiter, "\n\nRISC GP3 Registers:");
467 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
468 if (cnt % 8 == 0) {
469 qla_uprintf(&uiter, "\n");
470 }
471 qla_uprintf(&uiter, "%04x ", fw->risc_gp3_reg[cnt]);
472 }
473
474 qla_uprintf(&uiter, "\n\nRISC GP4 Registers:");
475 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
476 if (cnt % 8 == 0) {
477 qla_uprintf(&uiter, "\n");
478 }
479 qla_uprintf(&uiter, "%04x ", fw->risc_gp4_reg[cnt]);
480 }
481
482 qla_uprintf(&uiter, "\n\nRISC GP5 Registers:");
483 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
484 if (cnt % 8 == 0) {
485 qla_uprintf(&uiter, "\n");
486 }
487 qla_uprintf(&uiter, "%04x ", fw->risc_gp5_reg[cnt]);
488 }
489
490 qla_uprintf(&uiter, "\n\nRISC GP6 Registers:");
491 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
492 if (cnt % 8 == 0) {
493 qla_uprintf(&uiter, "\n");
494 }
495 qla_uprintf(&uiter, "%04x ", fw->risc_gp6_reg[cnt]);
496 }
497
498 qla_uprintf(&uiter, "\n\nRISC GP7 Registers:");
499 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
500 if (cnt % 8 == 0) {
501 qla_uprintf(&uiter, "\n");
502 }
503 qla_uprintf(&uiter, "%04x ", fw->risc_gp7_reg[cnt]);
504 }
505
506 qla_uprintf(&uiter, "\n\nFrame Buffer Hardware Registers:");
507 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
508 if (cnt % 8 == 0) {
509 qla_uprintf(&uiter, "\n");
510 }
511 qla_uprintf(&uiter, "%04x ", fw->frame_buf_hdw_reg[cnt]);
512 }
513
514 qla_uprintf(&uiter, "\n\nFPM B0 Registers:");
515 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
516 if (cnt % 8 == 0) {
517 qla_uprintf(&uiter, "\n");
518 }
519 qla_uprintf(&uiter, "%04x ", fw->fpm_b0_reg[cnt]);
520 }
521
522 qla_uprintf(&uiter, "\n\nFPM B1 Registers:");
523 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
524 if (cnt % 8 == 0) {
525 qla_uprintf(&uiter, "\n");
526 }
527 qla_uprintf(&uiter, "%04x ", fw->fpm_b1_reg[cnt]);
528 }
529
530 qla_uprintf(&uiter, "\n\nCode RAM Dump:");
531 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
532 if (cnt % 8 == 0) {
533 qla_uprintf(&uiter, "\n%04x: ", cnt + 0x0800);
534 }
535 qla_uprintf(&uiter, "%04x ", fw->risc_ram[cnt]);
536 }
537
538 qla_uprintf(&uiter, "\n\nStack RAM Dump:");
539 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
540 if (cnt % 8 == 0) {
541 qla_uprintf(&uiter, "\n%05x: ", cnt + 0x10000);
542 }
543 qla_uprintf(&uiter, "%04x ", fw->stack_ram[cnt]);
544 }
545
546 qla_uprintf(&uiter, "\n\nData RAM Dump:");
547 data_ram_cnt = ha->fw_memory_size - 0x11000 + 1;
548 for (cnt = 0; cnt < data_ram_cnt; cnt++) {
549 if (cnt % 8 == 0) {
550 qla_uprintf(&uiter, "\n%05x: ", cnt + 0x11000);
551 }
552 qla_uprintf(&uiter, "%04x ", fw->data_ram[cnt]);
553 }
554
555 qla_uprintf(&uiter, "\n\n[<==END] ISP Debug Dump.");
556}
557
558/**
559 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 404 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
560 * @ha: HA context 405 * @ha: HA context
561 * @hardware_locked: Called with the hardware_lock 406 * @hardware_locked: Called with the hardware_lock
@@ -591,10 +436,11 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
591 "request...\n", ha->fw_dump); 436 "request...\n", ha->fw_dump);
592 goto qla2100_fw_dump_failed; 437 goto qla2100_fw_dump_failed;
593 } 438 }
594 fw = ha->fw_dump; 439 fw = &ha->fw_dump->isp.isp21;
440 qla2xxx_prep_dump(ha, ha->fw_dump);
595 441
596 rval = QLA_SUCCESS; 442 rval = QLA_SUCCESS;
597 fw->hccr = RD_REG_WORD(&reg->hccr); 443 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
598 444
599 /* Pause RISC. */ 445 /* Pause RISC. */
600 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 446 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
@@ -608,79 +454,81 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
608 if (rval == QLA_SUCCESS) { 454 if (rval == QLA_SUCCESS) {
609 dmp_reg = (uint16_t __iomem *)(reg + 0); 455 dmp_reg = (uint16_t __iomem *)(reg + 0);
610 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 456 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
611 fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++); 457 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
612 458
613 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10); 459 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
614 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 460 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
615 if (cnt == 8) { 461 if (cnt == 8) {
616 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xe0); 462 dmp_reg = (uint16_t __iomem *)
463 ((uint8_t __iomem *)reg + 0xe0);
617 } 464 }
618 fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++); 465 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
619 } 466 }
620 467
621 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20); 468 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20);
622 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 469 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
623 fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 470 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
624 471
625 WRT_REG_WORD(&reg->ctrl_status, 0x00); 472 WRT_REG_WORD(&reg->ctrl_status, 0x00);
626 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0); 473 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
627 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 474 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
628 fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 475 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
629 476
630 WRT_REG_WORD(&reg->pcr, 0x2000); 477 WRT_REG_WORD(&reg->pcr, 0x2000);
631 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 478 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
632 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 479 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
633 fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++); 480 fw->risc_gp0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
634 481
635 WRT_REG_WORD(&reg->pcr, 0x2100); 482 WRT_REG_WORD(&reg->pcr, 0x2100);
636 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 483 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
637 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 484 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
638 fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++); 485 fw->risc_gp1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
639 486
640 WRT_REG_WORD(&reg->pcr, 0x2200); 487 WRT_REG_WORD(&reg->pcr, 0x2200);
641 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 488 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
642 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 489 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
643 fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++); 490 fw->risc_gp2_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
644 491
645 WRT_REG_WORD(&reg->pcr, 0x2300); 492 WRT_REG_WORD(&reg->pcr, 0x2300);
646 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 493 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
647 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 494 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
648 fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++); 495 fw->risc_gp3_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
649 496
650 WRT_REG_WORD(&reg->pcr, 0x2400); 497 WRT_REG_WORD(&reg->pcr, 0x2400);
651 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 498 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
652 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 499 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
653 fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++); 500 fw->risc_gp4_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
654 501
655 WRT_REG_WORD(&reg->pcr, 0x2500); 502 WRT_REG_WORD(&reg->pcr, 0x2500);
656 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 503 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
657 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 504 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
658 fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++); 505 fw->risc_gp5_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
659 506
660 WRT_REG_WORD(&reg->pcr, 0x2600); 507 WRT_REG_WORD(&reg->pcr, 0x2600);
661 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 508 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
662 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 509 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
663 fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++); 510 fw->risc_gp6_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
664 511
665 WRT_REG_WORD(&reg->pcr, 0x2700); 512 WRT_REG_WORD(&reg->pcr, 0x2700);
666 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 513 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
667 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 514 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
668 fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++); 515 fw->risc_gp7_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
669 516
670 WRT_REG_WORD(&reg->ctrl_status, 0x10); 517 WRT_REG_WORD(&reg->ctrl_status, 0x10);
671 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 518 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
672 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 519 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
673 fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 520 fw->frame_buf_hdw_reg[cnt] =
521 htons(RD_REG_WORD(dmp_reg++));
674 522
675 WRT_REG_WORD(&reg->ctrl_status, 0x20); 523 WRT_REG_WORD(&reg->ctrl_status, 0x20);
676 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 524 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
677 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 525 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
678 fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++); 526 fw->fpm_b0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
679 527
680 WRT_REG_WORD(&reg->ctrl_status, 0x30); 528 WRT_REG_WORD(&reg->ctrl_status, 0x30);
681 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 529 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
682 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 530 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
683 fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++); 531 fw->fpm_b1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
684 532
685 /* Reset the ISP. */ 533 /* Reset the ISP. */
686 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 534 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
@@ -755,12 +603,15 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
755 603
756 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 604 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
757 rval = mb0 & MBS_MASK; 605 rval = mb0 & MBS_MASK;
758 fw->risc_ram[cnt] = mb2; 606 fw->risc_ram[cnt] = htons(mb2);
759 } else { 607 } else {
760 rval = QLA_FUNCTION_FAILED; 608 rval = QLA_FUNCTION_FAILED;
761 } 609 }
762 } 610 }
763 611
612 if (rval == QLA_SUCCESS)
613 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
614
764 if (rval != QLA_SUCCESS) { 615 if (rval != QLA_SUCCESS) {
765 qla_printk(KERN_WARNING, ha, 616 qla_printk(KERN_WARNING, ha,
766 "Failed to dump firmware (%x)!!!\n", rval); 617 "Failed to dump firmware (%x)!!!\n", rval);
@@ -778,179 +629,6 @@ qla2100_fw_dump_failed:
778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 629 spin_unlock_irqrestore(&ha->hardware_lock, flags);
779} 630}
780 631
781/**
782 * qla2100_ascii_fw_dump() - Converts a binary firmware dump to ASCII.
783 * @ha: HA context
784 */
785void
786qla2100_ascii_fw_dump(scsi_qla_host_t *ha)
787{
788 uint32_t cnt;
789 char *uiter;
790 char fw_info[30];
791 struct qla2100_fw_dump *fw;
792
793 uiter = ha->fw_dump_buffer;
794 fw = ha->fw_dump;
795
796 qla_uprintf(&uiter, "%s Firmware Version %s\n", ha->model_number,
797 ha->isp_ops.fw_version_str(ha, fw_info));
798
799 qla_uprintf(&uiter, "\n[==>BEG]\n");
800
801 qla_uprintf(&uiter, "HCCR Register:\n%04x\n\n", fw->hccr);
802
803 qla_uprintf(&uiter, "PBIU Registers:");
804 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
805 if (cnt % 8 == 0) {
806 qla_uprintf(&uiter, "\n");
807 }
808 qla_uprintf(&uiter, "%04x ", fw->pbiu_reg[cnt]);
809 }
810
811 qla_uprintf(&uiter, "\n\nMailbox Registers:");
812 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
813 if (cnt % 8 == 0) {
814 qla_uprintf(&uiter, "\n");
815 }
816 qla_uprintf(&uiter, "%04x ", fw->mailbox_reg[cnt]);
817 }
818
819 qla_uprintf(&uiter, "\n\nDMA Registers:");
820 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
821 if (cnt % 8 == 0) {
822 qla_uprintf(&uiter, "\n");
823 }
824 qla_uprintf(&uiter, "%04x ", fw->dma_reg[cnt]);
825 }
826
827 qla_uprintf(&uiter, "\n\nRISC Hardware Registers:");
828 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
829 if (cnt % 8 == 0) {
830 qla_uprintf(&uiter, "\n");
831 }
832 qla_uprintf(&uiter, "%04x ", fw->risc_hdw_reg[cnt]);
833 }
834
835 qla_uprintf(&uiter, "\n\nRISC GP0 Registers:");
836 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
837 if (cnt % 8 == 0) {
838 qla_uprintf(&uiter, "\n");
839 }
840 qla_uprintf(&uiter, "%04x ", fw->risc_gp0_reg[cnt]);
841 }
842
843 qla_uprintf(&uiter, "\n\nRISC GP1 Registers:");
844 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
845 if (cnt % 8 == 0) {
846 qla_uprintf(&uiter, "\n");
847 }
848 qla_uprintf(&uiter, "%04x ", fw->risc_gp1_reg[cnt]);
849 }
850
851 qla_uprintf(&uiter, "\n\nRISC GP2 Registers:");
852 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
853 if (cnt % 8 == 0) {
854 qla_uprintf(&uiter, "\n");
855 }
856 qla_uprintf(&uiter, "%04x ", fw->risc_gp2_reg[cnt]);
857 }
858
859 qla_uprintf(&uiter, "\n\nRISC GP3 Registers:");
860 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
861 if (cnt % 8 == 0) {
862 qla_uprintf(&uiter, "\n");
863 }
864 qla_uprintf(&uiter, "%04x ", fw->risc_gp3_reg[cnt]);
865 }
866
867 qla_uprintf(&uiter, "\n\nRISC GP4 Registers:");
868 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
869 if (cnt % 8 == 0) {
870 qla_uprintf(&uiter, "\n");
871 }
872 qla_uprintf(&uiter, "%04x ", fw->risc_gp4_reg[cnt]);
873 }
874
875 qla_uprintf(&uiter, "\n\nRISC GP5 Registers:");
876 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
877 if (cnt % 8 == 0) {
878 qla_uprintf(&uiter, "\n");
879 }
880 qla_uprintf(&uiter, "%04x ", fw->risc_gp5_reg[cnt]);
881 }
882
883 qla_uprintf(&uiter, "\n\nRISC GP6 Registers:");
884 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
885 if (cnt % 8 == 0) {
886 qla_uprintf(&uiter, "\n");
887 }
888 qla_uprintf(&uiter, "%04x ", fw->risc_gp6_reg[cnt]);
889 }
890
891 qla_uprintf(&uiter, "\n\nRISC GP7 Registers:");
892 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
893 if (cnt % 8 == 0) {
894 qla_uprintf(&uiter, "\n");
895 }
896 qla_uprintf(&uiter, "%04x ", fw->risc_gp7_reg[cnt]);
897 }
898
899 qla_uprintf(&uiter, "\n\nFrame Buffer Hardware Registers:");
900 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
901 if (cnt % 8 == 0) {
902 qla_uprintf(&uiter, "\n");
903 }
904 qla_uprintf(&uiter, "%04x ", fw->frame_buf_hdw_reg[cnt]);
905 }
906
907 qla_uprintf(&uiter, "\n\nFPM B0 Registers:");
908 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
909 if (cnt % 8 == 0) {
910 qla_uprintf(&uiter, "\n");
911 }
912 qla_uprintf(&uiter, "%04x ", fw->fpm_b0_reg[cnt]);
913 }
914
915 qla_uprintf(&uiter, "\n\nFPM B1 Registers:");
916 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
917 if (cnt % 8 == 0) {
918 qla_uprintf(&uiter, "\n");
919 }
920 qla_uprintf(&uiter, "%04x ", fw->fpm_b1_reg[cnt]);
921 }
922
923 qla_uprintf(&uiter, "\n\nRISC SRAM:");
924 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
925 if (cnt % 8 == 0) {
926 qla_uprintf(&uiter, "\n%04x: ", cnt + 0x1000);
927 }
928 qla_uprintf(&uiter, "%04x ", fw->risc_ram[cnt]);
929 }
930
931 qla_uprintf(&uiter, "\n\n[<==END] ISP Debug Dump.");
932
933 return;
934}
935
936static int
937qla_uprintf(char **uiter, char *fmt, ...)
938{
939 int iter, len;
940 char buf[128];
941 va_list args;
942
943 va_start(args, fmt);
944 len = vsprintf(buf, fmt, args);
945 va_end(args);
946
947 for (iter = 0; iter < len; iter++, *uiter += 1)
948 *uiter[0] = buf[iter];
949
950 return (len);
951}
952
953
954void 632void
955qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 633qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
956{ 634{
@@ -967,6 +645,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
967 unsigned long flags; 645 unsigned long flags;
968 struct qla24xx_fw_dump *fw; 646 struct qla24xx_fw_dump *fw;
969 uint32_t ext_mem_cnt; 647 uint32_t ext_mem_cnt;
648 void *eft;
970 649
971 risc_address = ext_mem_cnt = 0; 650 risc_address = ext_mem_cnt = 0;
972 memset(mb, 0, sizeof(mb)); 651 memset(mb, 0, sizeof(mb));
@@ -987,10 +666,11 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
987 "request...\n", ha->fw_dump); 666 "request...\n", ha->fw_dump);
988 goto qla24xx_fw_dump_failed; 667 goto qla24xx_fw_dump_failed;
989 } 668 }
990 fw = ha->fw_dump; 669 fw = &ha->fw_dump->isp.isp24;
670 qla2xxx_prep_dump(ha, ha->fw_dump);
991 671
992 rval = QLA_SUCCESS; 672 rval = QLA_SUCCESS;
993 fw->host_status = RD_REG_DWORD(&reg->host_status); 673 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
994 674
995 /* Pause RISC. */ 675 /* Pause RISC. */
996 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) { 676 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) {
@@ -1012,7 +692,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1012 /* Host interface registers. */ 692 /* Host interface registers. */
1013 dmp_reg = (uint32_t __iomem *)(reg + 0); 693 dmp_reg = (uint32_t __iomem *)(reg + 0);
1014 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 694 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1015 fw->host_reg[cnt] = RD_REG_DWORD(dmp_reg++); 695 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1016 696
1017 /* Disable interrupts. */ 697 /* Disable interrupts. */
1018 WRT_REG_DWORD(&reg->ictrl, 0); 698 WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1024,470 +704,471 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1024 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 704 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1025 WRT_REG_DWORD(dmp_reg, 0xB0000000); 705 WRT_REG_DWORD(dmp_reg, 0xB0000000);
1026 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 706 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1027 fw->shadow_reg[0] = RD_REG_DWORD(dmp_reg); 707 fw->shadow_reg[0] = htonl(RD_REG_DWORD(dmp_reg));
1028 708
1029 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 709 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1030 WRT_REG_DWORD(dmp_reg, 0xB0100000); 710 WRT_REG_DWORD(dmp_reg, 0xB0100000);
1031 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 711 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1032 fw->shadow_reg[1] = RD_REG_DWORD(dmp_reg); 712 fw->shadow_reg[1] = htonl(RD_REG_DWORD(dmp_reg));
1033 713
1034 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 714 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1035 WRT_REG_DWORD(dmp_reg, 0xB0200000); 715 WRT_REG_DWORD(dmp_reg, 0xB0200000);
1036 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 716 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1037 fw->shadow_reg[2] = RD_REG_DWORD(dmp_reg); 717 fw->shadow_reg[2] = htonl(RD_REG_DWORD(dmp_reg));
1038 718
1039 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 719 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1040 WRT_REG_DWORD(dmp_reg, 0xB0300000); 720 WRT_REG_DWORD(dmp_reg, 0xB0300000);
1041 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 721 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1042 fw->shadow_reg[3] = RD_REG_DWORD(dmp_reg); 722 fw->shadow_reg[3] = htonl(RD_REG_DWORD(dmp_reg));
1043 723
1044 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 724 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1045 WRT_REG_DWORD(dmp_reg, 0xB0400000); 725 WRT_REG_DWORD(dmp_reg, 0xB0400000);
1046 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 726 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1047 fw->shadow_reg[4] = RD_REG_DWORD(dmp_reg); 727 fw->shadow_reg[4] = htonl(RD_REG_DWORD(dmp_reg));
1048 728
1049 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 729 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1050 WRT_REG_DWORD(dmp_reg, 0xB0500000); 730 WRT_REG_DWORD(dmp_reg, 0xB0500000);
1051 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 731 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1052 fw->shadow_reg[5] = RD_REG_DWORD(dmp_reg); 732 fw->shadow_reg[5] = htonl(RD_REG_DWORD(dmp_reg));
1053 733
1054 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 734 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1055 WRT_REG_DWORD(dmp_reg, 0xB0600000); 735 WRT_REG_DWORD(dmp_reg, 0xB0600000);
1056 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 736 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1057 fw->shadow_reg[6] = RD_REG_DWORD(dmp_reg); 737 fw->shadow_reg[6] = htonl(RD_REG_DWORD(dmp_reg));
1058 738
1059 /* Mailbox registers. */ 739 /* Mailbox registers. */
1060 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 740 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
1061 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 741 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1062 fw->mailbox_reg[cnt] = RD_REG_WORD(mbx_reg++); 742 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1063 743
1064 /* Transfer sequence registers. */ 744 /* Transfer sequence registers. */
1065 iter_reg = fw->xseq_gp_reg; 745 iter_reg = fw->xseq_gp_reg;
1066 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00); 746 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 747 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1068 for (cnt = 0; cnt < 16; cnt++) 748 for (cnt = 0; cnt < 16; cnt++)
1069 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1070 750
1071 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10); 751 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 752 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1073 for (cnt = 0; cnt < 16; cnt++) 753 for (cnt = 0; cnt < 16; cnt++)
1074 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1075 755
1076 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20); 756 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 757 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1078 for (cnt = 0; cnt < 16; cnt++) 758 for (cnt = 0; cnt < 16; cnt++)
1079 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 759 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1080 760
1081 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30); 761 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 762 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1083 for (cnt = 0; cnt < 16; cnt++) 763 for (cnt = 0; cnt < 16; cnt++)
1084 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 764 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1085 765
1086 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40); 766 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 767 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1088 for (cnt = 0; cnt < 16; cnt++) 768 for (cnt = 0; cnt < 16; cnt++)
1089 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 769 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1090 770
1091 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50); 771 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 772 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1093 for (cnt = 0; cnt < 16; cnt++) 773 for (cnt = 0; cnt < 16; cnt++)
1094 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 774 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1095 775
1096 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60); 776 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 777 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1098 for (cnt = 0; cnt < 16; cnt++) 778 for (cnt = 0; cnt < 16; cnt++)
1099 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 779 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1100 780
1101 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70); 781 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 782 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1103 for (cnt = 0; cnt < 16; cnt++) 783 for (cnt = 0; cnt < 16; cnt++)
1104 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 784 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1105 785
1106 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0); 786 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 787 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1108 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) 788 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++)
1109 fw->xseq_0_reg[cnt] = RD_REG_DWORD(dmp_reg++); 789 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1110 790
1111 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0); 791 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 792 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1113 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) 793 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
1114 fw->xseq_1_reg[cnt] = RD_REG_DWORD(dmp_reg++); 794 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1115 795
1116 /* Receive sequence registers. */ 796 /* Receive sequence registers. */
1117 iter_reg = fw->rseq_gp_reg; 797 iter_reg = fw->rseq_gp_reg;
1118 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00); 798 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 799 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1120 for (cnt = 0; cnt < 16; cnt++) 800 for (cnt = 0; cnt < 16; cnt++)
1121 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 801 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1122 802
1123 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10); 803 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 804 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1125 for (cnt = 0; cnt < 16; cnt++) 805 for (cnt = 0; cnt < 16; cnt++)
1126 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 806 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1127 807
1128 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20); 808 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 809 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1130 for (cnt = 0; cnt < 16; cnt++) 810 for (cnt = 0; cnt < 16; cnt++)
1131 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 811 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1132 812
1133 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30); 813 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 814 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1135 for (cnt = 0; cnt < 16; cnt++) 815 for (cnt = 0; cnt < 16; cnt++)
1136 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 816 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1137 817
1138 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40); 818 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 819 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1140 for (cnt = 0; cnt < 16; cnt++) 820 for (cnt = 0; cnt < 16; cnt++)
1141 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 821 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1142 822
1143 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50); 823 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 824 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1145 for (cnt = 0; cnt < 16; cnt++) 825 for (cnt = 0; cnt < 16; cnt++)
1146 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 826 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1147 827
1148 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60); 828 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 829 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1150 for (cnt = 0; cnt < 16; cnt++) 830 for (cnt = 0; cnt < 16; cnt++)
1151 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 831 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1152 832
1153 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70); 833 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 834 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1155 for (cnt = 0; cnt < 16; cnt++) 835 for (cnt = 0; cnt < 16; cnt++)
1156 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 836 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1157 837
1158 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0); 838 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 839 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1160 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) 840 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++)
1161 fw->rseq_0_reg[cnt] = RD_REG_DWORD(dmp_reg++); 841 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1162 842
1163 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0); 843 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 844 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1165 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) 845 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
1166 fw->rseq_1_reg[cnt] = RD_REG_DWORD(dmp_reg++); 846 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1167 847
1168 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0); 848 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 849 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1170 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) 850 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
1171 fw->rseq_2_reg[cnt] = RD_REG_DWORD(dmp_reg++); 851 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1172 852
1173 /* Command DMA registers. */ 853 /* Command DMA registers. */
1174 WRT_REG_DWORD(&reg->iobase_addr, 0x7100); 854 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
1175 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 855 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1176 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) 856 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
1177 fw->cmd_dma_reg[cnt] = RD_REG_DWORD(dmp_reg++); 857 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1178 858
1179 /* Queues. */ 859 /* Queues. */
1180 iter_reg = fw->req0_dma_reg; 860 iter_reg = fw->req0_dma_reg;
1181 WRT_REG_DWORD(&reg->iobase_addr, 0x7200); 861 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
1182 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 862 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1183 for (cnt = 0; cnt < 8; cnt++) 863 for (cnt = 0; cnt < 8; cnt++)
1184 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 864 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1185 865
1186 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 866 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1187 for (cnt = 0; cnt < 7; cnt++) 867 for (cnt = 0; cnt < 7; cnt++)
1188 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1189 869
1190 iter_reg = fw->resp0_dma_reg; 870 iter_reg = fw->resp0_dma_reg;
1191 WRT_REG_DWORD(&reg->iobase_addr, 0x7300); 871 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
1192 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 872 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1193 for (cnt = 0; cnt < 8; cnt++) 873 for (cnt = 0; cnt < 8; cnt++)
1194 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 874 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1195 875
1196 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 876 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1197 for (cnt = 0; cnt < 7; cnt++) 877 for (cnt = 0; cnt < 7; cnt++)
1198 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1199 879
1200 iter_reg = fw->req1_dma_reg; 880 iter_reg = fw->req1_dma_reg;
1201 WRT_REG_DWORD(&reg->iobase_addr, 0x7400); 881 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
1202 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 882 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1203 for (cnt = 0; cnt < 8; cnt++) 883 for (cnt = 0; cnt < 8; cnt++)
1204 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 884 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1205 885
1206 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 886 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1207 for (cnt = 0; cnt < 7; cnt++) 887 for (cnt = 0; cnt < 7; cnt++)
1208 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 888 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1209 889
1210 /* Transmit DMA registers. */ 890 /* Transmit DMA registers. */
1211 iter_reg = fw->xmt0_dma_reg; 891 iter_reg = fw->xmt0_dma_reg;
1212 WRT_REG_DWORD(&reg->iobase_addr, 0x7600); 892 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
1213 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 893 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1214 for (cnt = 0; cnt < 16; cnt++) 894 for (cnt = 0; cnt < 16; cnt++)
1215 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 895 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1216 896
1217 WRT_REG_DWORD(&reg->iobase_addr, 0x7610); 897 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
1218 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 898 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1219 for (cnt = 0; cnt < 16; cnt++) 899 for (cnt = 0; cnt < 16; cnt++)
1220 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1221 901
1222 iter_reg = fw->xmt1_dma_reg; 902 iter_reg = fw->xmt1_dma_reg;
1223 WRT_REG_DWORD(&reg->iobase_addr, 0x7620); 903 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
1224 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 904 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1225 for (cnt = 0; cnt < 16; cnt++) 905 for (cnt = 0; cnt < 16; cnt++)
1226 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1227 907
1228 WRT_REG_DWORD(&reg->iobase_addr, 0x7630); 908 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
1229 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 909 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1230 for (cnt = 0; cnt < 16; cnt++) 910 for (cnt = 0; cnt < 16; cnt++)
1231 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1232 912
1233 iter_reg = fw->xmt2_dma_reg; 913 iter_reg = fw->xmt2_dma_reg;
1234 WRT_REG_DWORD(&reg->iobase_addr, 0x7640); 914 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
1235 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 915 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1236 for (cnt = 0; cnt < 16; cnt++) 916 for (cnt = 0; cnt < 16; cnt++)
1237 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 917 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1238 918
1239 WRT_REG_DWORD(&reg->iobase_addr, 0x7650); 919 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
1240 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 920 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1241 for (cnt = 0; cnt < 16; cnt++) 921 for (cnt = 0; cnt < 16; cnt++)
1242 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 922 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1243 923
1244 iter_reg = fw->xmt3_dma_reg; 924 iter_reg = fw->xmt3_dma_reg;
1245 WRT_REG_DWORD(&reg->iobase_addr, 0x7660); 925 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
1246 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 926 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1247 for (cnt = 0; cnt < 16; cnt++) 927 for (cnt = 0; cnt < 16; cnt++)
1248 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1249 929
1250 WRT_REG_DWORD(&reg->iobase_addr, 0x7670); 930 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
1251 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 931 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1252 for (cnt = 0; cnt < 16; cnt++) 932 for (cnt = 0; cnt < 16; cnt++)
1253 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1254 934
1255 iter_reg = fw->xmt4_dma_reg; 935 iter_reg = fw->xmt4_dma_reg;
1256 WRT_REG_DWORD(&reg->iobase_addr, 0x7680); 936 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
1257 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 937 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1258 for (cnt = 0; cnt < 16; cnt++) 938 for (cnt = 0; cnt < 16; cnt++)
1259 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 939 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1260 940
1261 WRT_REG_DWORD(&reg->iobase_addr, 0x7690); 941 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
1262 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 942 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1263 for (cnt = 0; cnt < 16; cnt++) 943 for (cnt = 0; cnt < 16; cnt++)
1264 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 944 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1265 945
1266 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0); 946 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
1267 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 947 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1268 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) 948 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
1269 fw->xmt_data_dma_reg[cnt] = RD_REG_DWORD(dmp_reg++); 949 fw->xmt_data_dma_reg[cnt] =
950 htonl(RD_REG_DWORD(dmp_reg++));
1270 951
1271 /* Receive DMA registers. */ 952 /* Receive DMA registers. */
1272 iter_reg = fw->rcvt0_data_dma_reg; 953 iter_reg = fw->rcvt0_data_dma_reg;
1273 WRT_REG_DWORD(&reg->iobase_addr, 0x7700); 954 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
1274 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 955 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1275 for (cnt = 0; cnt < 16; cnt++) 956 for (cnt = 0; cnt < 16; cnt++)
1276 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 957 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1277 958
1278 WRT_REG_DWORD(&reg->iobase_addr, 0x7710); 959 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
1279 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 960 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1280 for (cnt = 0; cnt < 16; cnt++) 961 for (cnt = 0; cnt < 16; cnt++)
1281 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 962 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1282 963
1283 iter_reg = fw->rcvt1_data_dma_reg; 964 iter_reg = fw->rcvt1_data_dma_reg;
1284 WRT_REG_DWORD(&reg->iobase_addr, 0x7720); 965 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
1285 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 966 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1286 for (cnt = 0; cnt < 16; cnt++) 967 for (cnt = 0; cnt < 16; cnt++)
1287 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1288 969
1289 WRT_REG_DWORD(&reg->iobase_addr, 0x7730); 970 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
1290 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 971 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1291 for (cnt = 0; cnt < 16; cnt++) 972 for (cnt = 0; cnt < 16; cnt++)
1292 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1293 974
1294 /* RISC registers. */ 975 /* RISC registers. */
1295 iter_reg = fw->risc_gp_reg; 976 iter_reg = fw->risc_gp_reg;
1296 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00); 977 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
1297 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 978 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1298 for (cnt = 0; cnt < 16; cnt++) 979 for (cnt = 0; cnt < 16; cnt++)
1299 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 980 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1300 981
1301 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10); 982 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
1302 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 983 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1303 for (cnt = 0; cnt < 16; cnt++) 984 for (cnt = 0; cnt < 16; cnt++)
1304 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 985 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1305 986
1306 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20); 987 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
1307 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 988 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1308 for (cnt = 0; cnt < 16; cnt++) 989 for (cnt = 0; cnt < 16; cnt++)
1309 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 990 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1310 991
1311 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30); 992 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
1312 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 993 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1313 for (cnt = 0; cnt < 16; cnt++) 994 for (cnt = 0; cnt < 16; cnt++)
1314 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 995 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1315 996
1316 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40); 997 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
1317 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 998 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1318 for (cnt = 0; cnt < 16; cnt++) 999 for (cnt = 0; cnt < 16; cnt++)
1319 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1000 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1320 1001
1321 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50); 1002 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1322 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1003 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1323 for (cnt = 0; cnt < 16; cnt++) 1004 for (cnt = 0; cnt < 16; cnt++)
1324 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1325 1006
1326 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60); 1007 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1327 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1008 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1328 for (cnt = 0; cnt < 16; cnt++) 1009 for (cnt = 0; cnt < 16; cnt++)
1329 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1330 1011
1331 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 1012 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1332 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1013 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1333 for (cnt = 0; cnt < 16; cnt++) 1014 for (cnt = 0; cnt < 16; cnt++)
1334 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1015 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1335 1016
1336 /* Local memory controller registers. */ 1017 /* Local memory controller registers. */
1337 iter_reg = fw->lmc_reg; 1018 iter_reg = fw->lmc_reg;
1338 WRT_REG_DWORD(&reg->iobase_addr, 0x3000); 1019 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1339 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1020 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1340 for (cnt = 0; cnt < 16; cnt++) 1021 for (cnt = 0; cnt < 16; cnt++)
1341 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1022 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1342 1023
1343 WRT_REG_DWORD(&reg->iobase_addr, 0x3010); 1024 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1344 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1025 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1345 for (cnt = 0; cnt < 16; cnt++) 1026 for (cnt = 0; cnt < 16; cnt++)
1346 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1347 1028
1348 WRT_REG_DWORD(&reg->iobase_addr, 0x3020); 1029 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1349 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1030 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1350 for (cnt = 0; cnt < 16; cnt++) 1031 for (cnt = 0; cnt < 16; cnt++)
1351 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1352 1033
1353 WRT_REG_DWORD(&reg->iobase_addr, 0x3030); 1034 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1354 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1035 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1355 for (cnt = 0; cnt < 16; cnt++) 1036 for (cnt = 0; cnt < 16; cnt++)
1356 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1037 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1357 1038
1358 WRT_REG_DWORD(&reg->iobase_addr, 0x3040); 1039 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1359 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1040 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1360 for (cnt = 0; cnt < 16; cnt++) 1041 for (cnt = 0; cnt < 16; cnt++)
1361 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1042 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1362 1043
1363 WRT_REG_DWORD(&reg->iobase_addr, 0x3050); 1044 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1364 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1045 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1365 for (cnt = 0; cnt < 16; cnt++) 1046 for (cnt = 0; cnt < 16; cnt++)
1366 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1047 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1367 1048
1368 WRT_REG_DWORD(&reg->iobase_addr, 0x3060); 1049 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1369 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1050 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1370 for (cnt = 0; cnt < 16; cnt++) 1051 for (cnt = 0; cnt < 16; cnt++)
1371 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1052 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1372 1053
1373 /* Fibre Protocol Module registers. */ 1054 /* Fibre Protocol Module registers. */
1374 iter_reg = fw->fpm_hdw_reg; 1055 iter_reg = fw->fpm_hdw_reg;
1375 WRT_REG_DWORD(&reg->iobase_addr, 0x4000); 1056 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1376 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1057 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1377 for (cnt = 0; cnt < 16; cnt++) 1058 for (cnt = 0; cnt < 16; cnt++)
1378 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1059 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1379 1060
1380 WRT_REG_DWORD(&reg->iobase_addr, 0x4010); 1061 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1381 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1062 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1382 for (cnt = 0; cnt < 16; cnt++) 1063 for (cnt = 0; cnt < 16; cnt++)
1383 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1064 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1384 1065
1385 WRT_REG_DWORD(&reg->iobase_addr, 0x4020); 1066 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1386 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1387 for (cnt = 0; cnt < 16; cnt++) 1068 for (cnt = 0; cnt < 16; cnt++)
1388 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1069 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1389 1070
1390 WRT_REG_DWORD(&reg->iobase_addr, 0x4030); 1071 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1391 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1392 for (cnt = 0; cnt < 16; cnt++) 1073 for (cnt = 0; cnt < 16; cnt++)
1393 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1074 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1394 1075
1395 WRT_REG_DWORD(&reg->iobase_addr, 0x4040); 1076 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1396 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1397 for (cnt = 0; cnt < 16; cnt++) 1078 for (cnt = 0; cnt < 16; cnt++)
1398 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1399 1080
1400 WRT_REG_DWORD(&reg->iobase_addr, 0x4050); 1081 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1401 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1402 for (cnt = 0; cnt < 16; cnt++) 1083 for (cnt = 0; cnt < 16; cnt++)
1403 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1404 1085
1405 WRT_REG_DWORD(&reg->iobase_addr, 0x4060); 1086 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1406 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1407 for (cnt = 0; cnt < 16; cnt++) 1088 for (cnt = 0; cnt < 16; cnt++)
1408 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1409 1090
1410 WRT_REG_DWORD(&reg->iobase_addr, 0x4070); 1091 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1411 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1412 for (cnt = 0; cnt < 16; cnt++) 1093 for (cnt = 0; cnt < 16; cnt++)
1413 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1414 1095
1415 WRT_REG_DWORD(&reg->iobase_addr, 0x4080); 1096 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1416 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1417 for (cnt = 0; cnt < 16; cnt++) 1098 for (cnt = 0; cnt < 16; cnt++)
1418 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1419 1100
1420 WRT_REG_DWORD(&reg->iobase_addr, 0x4090); 1101 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1421 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1422 for (cnt = 0; cnt < 16; cnt++) 1103 for (cnt = 0; cnt < 16; cnt++)
1423 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1424 1105
1425 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0); 1106 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1426 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1427 for (cnt = 0; cnt < 16; cnt++) 1108 for (cnt = 0; cnt < 16; cnt++)
1428 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1429 1110
1430 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0); 1111 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1431 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1432 for (cnt = 0; cnt < 16; cnt++) 1113 for (cnt = 0; cnt < 16; cnt++)
1433 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1434 1115
1435 /* Frame Buffer registers. */ 1116 /* Frame Buffer registers. */
1436 iter_reg = fw->fb_hdw_reg; 1117 iter_reg = fw->fb_hdw_reg;
1437 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 1118 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1438 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1439 for (cnt = 0; cnt < 16; cnt++) 1120 for (cnt = 0; cnt < 16; cnt++)
1440 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1441 1122
1442 WRT_REG_DWORD(&reg->iobase_addr, 0x6010); 1123 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1443 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1444 for (cnt = 0; cnt < 16; cnt++) 1125 for (cnt = 0; cnt < 16; cnt++)
1445 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1446 1127
1447 WRT_REG_DWORD(&reg->iobase_addr, 0x6020); 1128 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1448 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1449 for (cnt = 0; cnt < 16; cnt++) 1130 for (cnt = 0; cnt < 16; cnt++)
1450 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1451 1132
1452 WRT_REG_DWORD(&reg->iobase_addr, 0x6030); 1133 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1453 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1454 for (cnt = 0; cnt < 16; cnt++) 1135 for (cnt = 0; cnt < 16; cnt++)
1455 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1456 1137
1457 WRT_REG_DWORD(&reg->iobase_addr, 0x6040); 1138 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1458 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1459 for (cnt = 0; cnt < 16; cnt++) 1140 for (cnt = 0; cnt < 16; cnt++)
1460 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1461 1142
1462 WRT_REG_DWORD(&reg->iobase_addr, 0x6100); 1143 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1463 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1464 for (cnt = 0; cnt < 16; cnt++) 1145 for (cnt = 0; cnt < 16; cnt++)
1465 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1466 1147
1467 WRT_REG_DWORD(&reg->iobase_addr, 0x6130); 1148 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1468 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1469 for (cnt = 0; cnt < 16; cnt++) 1150 for (cnt = 0; cnt < 16; cnt++)
1470 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1471 1152
1472 WRT_REG_DWORD(&reg->iobase_addr, 0x6150); 1153 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1473 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1474 for (cnt = 0; cnt < 16; cnt++) 1155 for (cnt = 0; cnt < 16; cnt++)
1475 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1156 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1476 1157
1477 WRT_REG_DWORD(&reg->iobase_addr, 0x6170); 1158 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1478 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1479 for (cnt = 0; cnt < 16; cnt++) 1160 for (cnt = 0; cnt < 16; cnt++)
1480 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1161 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1481 1162
1482 WRT_REG_DWORD(&reg->iobase_addr, 0x6190); 1163 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1483 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1484 for (cnt = 0; cnt < 16; cnt++) 1165 for (cnt = 0; cnt < 16; cnt++)
1485 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1166 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1486 1167
1487 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0); 1168 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1488 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1489 for (cnt = 0; cnt < 16; cnt++) 1170 for (cnt = 0; cnt < 16; cnt++)
1490 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1171 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1491 1172
1492 /* Reset RISC. */ 1173 /* Reset RISC. */
1493 WRT_REG_DWORD(&reg->ctrl_status, 1174 WRT_REG_DWORD(&reg->ctrl_status,
@@ -1577,7 +1258,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1577 1258
1578 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1259 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1579 rval = mb[0] & MBS_MASK; 1260 rval = mb[0] & MBS_MASK;
1580 fw->code_ram[cnt] = (mb[3] << 16) | mb[2]; 1261 fw->code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
1581 } else { 1262 } else {
1582 rval = QLA_FUNCTION_FAILED; 1263 rval = QLA_FUNCTION_FAILED;
1583 } 1264 }
@@ -1627,12 +1308,18 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1627 1308
1628 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1309 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1629 rval = mb[0] & MBS_MASK; 1310 rval = mb[0] & MBS_MASK;
1630 fw->ext_mem[cnt] = (mb[3] << 16) | mb[2]; 1311 fw->ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
1631 } else { 1312 } else {
1632 rval = QLA_FUNCTION_FAILED; 1313 rval = QLA_FUNCTION_FAILED;
1633 } 1314 }
1634 } 1315 }
1635 1316
1317 if (rval == QLA_SUCCESS) {
1318 eft = qla2xxx_copy_queues(ha, &fw->ext_mem[cnt]);
1319 if (ha->eft)
1320 memcpy(eft, ha->eft, ntohl(ha->fw_dump->eft_size));
1321 }
1322
1636 if (rval != QLA_SUCCESS) { 1323 if (rval != QLA_SUCCESS) {
1637 qla_printk(KERN_WARNING, ha, 1324 qla_printk(KERN_WARNING, ha,
1638 "Failed to dump firmware (%x)!!!\n", rval); 1325 "Failed to dump firmware (%x)!!!\n", rval);
@@ -1650,252 +1337,6 @@ qla24xx_fw_dump_failed:
1650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1651} 1338}
1652 1339
1653void
1654qla24xx_ascii_fw_dump(scsi_qla_host_t *ha)
1655{
1656 uint32_t cnt;
1657 char *uiter;
1658 struct qla24xx_fw_dump *fw;
1659 uint32_t ext_mem_cnt;
1660
1661 uiter = ha->fw_dump_buffer;
1662 fw = ha->fw_dump;
1663
1664 qla_uprintf(&uiter, "ISP FW Version %d.%02d.%02d Attributes %04x\n",
1665 ha->fw_major_version, ha->fw_minor_version,
1666 ha->fw_subminor_version, ha->fw_attributes);
1667
1668 qla_uprintf(&uiter, "\nR2H Status Register\n%04x\n", fw->host_status);
1669
1670 qla_uprintf(&uiter, "\nHost Interface Registers");
1671 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) {
1672 if (cnt % 8 == 0)
1673 qla_uprintf(&uiter, "\n");
1674
1675 qla_uprintf(&uiter, "%08x ", fw->host_reg[cnt]);
1676 }
1677
1678 qla_uprintf(&uiter, "\n\nShadow Registers");
1679 for (cnt = 0; cnt < sizeof(fw->shadow_reg) / 4; cnt++) {
1680 if (cnt % 8 == 0)
1681 qla_uprintf(&uiter, "\n");
1682
1683 qla_uprintf(&uiter, "%08x ", fw->shadow_reg[cnt]);
1684 }
1685
1686 qla_uprintf(&uiter, "\n\nMailbox Registers");
1687 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) {
1688 if (cnt % 8 == 0)
1689 qla_uprintf(&uiter, "\n");
1690
1691 qla_uprintf(&uiter, "%08x ", fw->mailbox_reg[cnt]);
1692 }
1693
1694 qla_uprintf(&uiter, "\n\nXSEQ GP Registers");
1695 for (cnt = 0; cnt < sizeof(fw->xseq_gp_reg) / 4; cnt++) {
1696 if (cnt % 8 == 0)
1697 qla_uprintf(&uiter, "\n");
1698
1699 qla_uprintf(&uiter, "%08x ", fw->xseq_gp_reg[cnt]);
1700 }
1701
1702 qla_uprintf(&uiter, "\n\nXSEQ-0 Registers");
1703 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) {
1704 if (cnt % 8 == 0)
1705 qla_uprintf(&uiter, "\n");
1706
1707 qla_uprintf(&uiter, "%08x ", fw->xseq_0_reg[cnt]);
1708 }
1709
1710 qla_uprintf(&uiter, "\n\nXSEQ-1 Registers");
1711 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) {
1712 if (cnt % 8 == 0)
1713 qla_uprintf(&uiter, "\n");
1714
1715 qla_uprintf(&uiter, "%08x ", fw->xseq_1_reg[cnt]);
1716 }
1717
1718 qla_uprintf(&uiter, "\n\nRSEQ GP Registers");
1719 for (cnt = 0; cnt < sizeof(fw->rseq_gp_reg) / 4; cnt++) {
1720 if (cnt % 8 == 0)
1721 qla_uprintf(&uiter, "\n");
1722
1723 qla_uprintf(&uiter, "%08x ", fw->rseq_gp_reg[cnt]);
1724 }
1725
1726 qla_uprintf(&uiter, "\n\nRSEQ-0 Registers");
1727 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) {
1728 if (cnt % 8 == 0)
1729 qla_uprintf(&uiter, "\n");
1730
1731 qla_uprintf(&uiter, "%08x ", fw->rseq_0_reg[cnt]);
1732 }
1733
1734 qla_uprintf(&uiter, "\n\nRSEQ-1 Registers");
1735 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) {
1736 if (cnt % 8 == 0)
1737 qla_uprintf(&uiter, "\n");
1738
1739 qla_uprintf(&uiter, "%08x ", fw->rseq_1_reg[cnt]);
1740 }
1741
1742 qla_uprintf(&uiter, "\n\nRSEQ-2 Registers");
1743 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) {
1744 if (cnt % 8 == 0)
1745 qla_uprintf(&uiter, "\n");
1746
1747 qla_uprintf(&uiter, "%08x ", fw->rseq_2_reg[cnt]);
1748 }
1749
1750 qla_uprintf(&uiter, "\n\nCommand DMA Registers");
1751 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) {
1752 if (cnt % 8 == 0)
1753 qla_uprintf(&uiter, "\n");
1754
1755 qla_uprintf(&uiter, "%08x ", fw->cmd_dma_reg[cnt]);
1756 }
1757
1758 qla_uprintf(&uiter, "\n\nRequest0 Queue DMA Channel Registers");
1759 for (cnt = 0; cnt < sizeof(fw->req0_dma_reg) / 4; cnt++) {
1760 if (cnt % 8 == 0)
1761 qla_uprintf(&uiter, "\n");
1762
1763 qla_uprintf(&uiter, "%08x ", fw->req0_dma_reg[cnt]);
1764 }
1765
1766 qla_uprintf(&uiter, "\n\nResponse0 Queue DMA Channel Registers");
1767 for (cnt = 0; cnt < sizeof(fw->resp0_dma_reg) / 4; cnt++) {
1768 if (cnt % 8 == 0)
1769 qla_uprintf(&uiter, "\n");
1770
1771 qla_uprintf(&uiter, "%08x ", fw->resp0_dma_reg[cnt]);
1772 }
1773
1774 qla_uprintf(&uiter, "\n\nRequest1 Queue DMA Channel Registers");
1775 for (cnt = 0; cnt < sizeof(fw->req1_dma_reg) / 4; cnt++) {
1776 if (cnt % 8 == 0)
1777 qla_uprintf(&uiter, "\n");
1778
1779 qla_uprintf(&uiter, "%08x ", fw->req1_dma_reg[cnt]);
1780 }
1781
1782 qla_uprintf(&uiter, "\n\nXMT0 Data DMA Registers");
1783 for (cnt = 0; cnt < sizeof(fw->xmt0_dma_reg) / 4; cnt++) {
1784 if (cnt % 8 == 0)
1785 qla_uprintf(&uiter, "\n");
1786
1787 qla_uprintf(&uiter, "%08x ", fw->xmt0_dma_reg[cnt]);
1788 }
1789
1790 qla_uprintf(&uiter, "\n\nXMT1 Data DMA Registers");
1791 for (cnt = 0; cnt < sizeof(fw->xmt1_dma_reg) / 4; cnt++) {
1792 if (cnt % 8 == 0)
1793 qla_uprintf(&uiter, "\n");
1794
1795 qla_uprintf(&uiter, "%08x ", fw->xmt1_dma_reg[cnt]);
1796 }
1797
1798 qla_uprintf(&uiter, "\n\nXMT2 Data DMA Registers");
1799 for (cnt = 0; cnt < sizeof(fw->xmt2_dma_reg) / 4; cnt++) {
1800 if (cnt % 8 == 0)
1801 qla_uprintf(&uiter, "\n");
1802
1803 qla_uprintf(&uiter, "%08x ", fw->xmt2_dma_reg[cnt]);
1804 }
1805
1806 qla_uprintf(&uiter, "\n\nXMT3 Data DMA Registers");
1807 for (cnt = 0; cnt < sizeof(fw->xmt3_dma_reg) / 4; cnt++) {
1808 if (cnt % 8 == 0)
1809 qla_uprintf(&uiter, "\n");
1810
1811 qla_uprintf(&uiter, "%08x ", fw->xmt3_dma_reg[cnt]);
1812 }
1813
1814 qla_uprintf(&uiter, "\n\nXMT4 Data DMA Registers");
1815 for (cnt = 0; cnt < sizeof(fw->xmt4_dma_reg) / 4; cnt++) {
1816 if (cnt % 8 == 0)
1817 qla_uprintf(&uiter, "\n");
1818
1819 qla_uprintf(&uiter, "%08x ", fw->xmt4_dma_reg[cnt]);
1820 }
1821
1822 qla_uprintf(&uiter, "\n\nXMT Data DMA Common Registers");
1823 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) {
1824 if (cnt % 8 == 0)
1825 qla_uprintf(&uiter, "\n");
1826
1827 qla_uprintf(&uiter, "%08x ", fw->xmt_data_dma_reg[cnt]);
1828 }
1829
1830 qla_uprintf(&uiter, "\n\nRCV Thread 0 Data DMA Registers");
1831 for (cnt = 0; cnt < sizeof(fw->rcvt0_data_dma_reg) / 4; cnt++) {
1832 if (cnt % 8 == 0)
1833 qla_uprintf(&uiter, "\n");
1834
1835 qla_uprintf(&uiter, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
1836 }
1837
1838 qla_uprintf(&uiter, "\n\nRCV Thread 1 Data DMA Registers");
1839 for (cnt = 0; cnt < sizeof(fw->rcvt1_data_dma_reg) / 4; cnt++) {
1840 if (cnt % 8 == 0)
1841 qla_uprintf(&uiter, "\n");
1842
1843 qla_uprintf(&uiter, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
1844 }
1845
1846 qla_uprintf(&uiter, "\n\nRISC GP Registers");
1847 for (cnt = 0; cnt < sizeof(fw->risc_gp_reg) / 4; cnt++) {
1848 if (cnt % 8 == 0)
1849 qla_uprintf(&uiter, "\n");
1850
1851 qla_uprintf(&uiter, "%08x ", fw->risc_gp_reg[cnt]);
1852 }
1853
1854 qla_uprintf(&uiter, "\n\nLMC Registers");
1855 for (cnt = 0; cnt < sizeof(fw->lmc_reg) / 4; cnt++) {
1856 if (cnt % 8 == 0)
1857 qla_uprintf(&uiter, "\n");
1858
1859 qla_uprintf(&uiter, "%08x ", fw->lmc_reg[cnt]);
1860 }
1861
1862 qla_uprintf(&uiter, "\n\nFPM Hardware Registers");
1863 for (cnt = 0; cnt < sizeof(fw->fpm_hdw_reg) / 4; cnt++) {
1864 if (cnt % 8 == 0)
1865 qla_uprintf(&uiter, "\n");
1866
1867 qla_uprintf(&uiter, "%08x ", fw->fpm_hdw_reg[cnt]);
1868 }
1869
1870 qla_uprintf(&uiter, "\n\nFB Hardware Registers");
1871 for (cnt = 0; cnt < sizeof(fw->fb_hdw_reg) / 4; cnt++) {
1872 if (cnt % 8 == 0)
1873 qla_uprintf(&uiter, "\n");
1874
1875 qla_uprintf(&uiter, "%08x ", fw->fb_hdw_reg[cnt]);
1876 }
1877
1878 qla_uprintf(&uiter, "\n\nCode RAM");
1879 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
1880 if (cnt % 8 == 0) {
1881 qla_uprintf(&uiter, "\n%08x: ", cnt + 0x20000);
1882 }
1883 qla_uprintf(&uiter, "%08x ", fw->code_ram[cnt]);
1884 }
1885
1886 qla_uprintf(&uiter, "\n\nExternal Memory");
1887 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
1888 for (cnt = 0; cnt < ext_mem_cnt; cnt++) {
1889 if (cnt % 8 == 0) {
1890 qla_uprintf(&uiter, "\n%08x: ", cnt + 0x100000);
1891 }
1892 qla_uprintf(&uiter, "%08x ", fw->ext_mem[cnt]);
1893 }
1894
1895 qla_uprintf(&uiter, "\n[<==END] ISP Debug Dump");
1896}
1897
1898
1899/****************************************************************************/ 1340/****************************************************************************/
1900/* Driver Debug Functions. */ 1341/* Driver Debug Functions. */
1901/****************************************************************************/ 1342/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index ab6afeaa2f2c..533425338e05 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -37,134 +37,86 @@
37/* 37/*
38* Macros use for debugging the driver. 38* Macros use for debugging the driver.
39*/ 39*/
40#undef ENTER_TRACE
41#if defined(ENTER_TRACE)
42#define ENTER(x) do { printk("qla2100 : Entering %s()\n", x); } while (0)
43#define LEAVE(x) do { printk("qla2100 : Leaving %s()\n", x); } while (0)
44#define ENTER_INTR(x) do { printk("qla2100 : Entering %s()\n", x); } while (0)
45#define LEAVE_INTR(x) do { printk("qla2100 : Leaving %s()\n", x); } while (0)
46#else
47#define ENTER(x) do {} while (0)
48#define LEAVE(x) do {} while (0)
49#define ENTER_INTR(x) do {} while (0)
50#define LEAVE_INTR(x) do {} while (0)
51#endif
52 40
53#if DEBUG_QLA2100 41#define DEBUG(x) do { if (extended_error_logging) { x; } } while (0)
54#define DEBUG(x) do {x;} while (0);
55#else
56#define DEBUG(x) do {} while (0);
57#endif
58 42
59#if defined(QL_DEBUG_LEVEL_1) 43#if defined(QL_DEBUG_LEVEL_1)
60#define DEBUG1(x) do {x;} while (0); 44#define DEBUG1(x) do {x;} while (0)
61#else 45#else
62#define DEBUG1(x) do {} while (0); 46#define DEBUG1(x) do {} while (0)
63#endif 47#endif
64 48
65#if defined(QL_DEBUG_LEVEL_2) 49#define DEBUG2(x) do { if (extended_error_logging) { x; } } while (0)
66#define DEBUG2(x) do {x;} while (0); 50#define DEBUG2_3(x) do { if (extended_error_logging) { x; } } while (0)
67#define DEBUG2_3(x) do {x;} while (0); 51#define DEBUG2_3_11(x) do { if (extended_error_logging) { x; } } while (0)
68#define DEBUG2_3_11(x) do {x;} while (0); 52#define DEBUG2_9_10(x) do { if (extended_error_logging) { x; } } while (0)
69#define DEBUG2_9_10(x) do {x;} while (0); 53#define DEBUG2_11(x) do { if (extended_error_logging) { x; } } while (0)
70#define DEBUG2_11(x) do {x;} while (0); 54#define DEBUG2_13(x) do { if (extended_error_logging) { x; } } while (0)
71#define DEBUG2_13(x) do {x;} while (0);
72#else
73#define DEBUG2(x) do {} while (0);
74#endif
75 55
76#if defined(QL_DEBUG_LEVEL_3) 56#if defined(QL_DEBUG_LEVEL_3)
77#define DEBUG3(x) do {x;} while (0); 57#define DEBUG3(x) do {x;} while (0)
78#define DEBUG2_3(x) do {x;} while (0); 58#define DEBUG3_11(x) do {x;} while (0)
79#define DEBUG2_3_11(x) do {x;} while (0);
80#define DEBUG3_11(x) do {x;} while (0);
81#else 59#else
82#define DEBUG3(x) do {} while (0); 60#define DEBUG3(x) do {} while (0)
83 #if !defined(QL_DEBUG_LEVEL_2)
84 #define DEBUG2_3(x) do {} while (0);
85 #endif
86#endif 61#endif
87 62
88#if defined(QL_DEBUG_LEVEL_4) 63#if defined(QL_DEBUG_LEVEL_4)
89#define DEBUG4(x) do {x;} while (0); 64#define DEBUG4(x) do {x;} while (0)
90#else 65#else
91#define DEBUG4(x) do {} while (0); 66#define DEBUG4(x) do {} while (0)
92#endif 67#endif
93 68
94#if defined(QL_DEBUG_LEVEL_5) 69#if defined(QL_DEBUG_LEVEL_5)
95#define DEBUG5(x) do {x;} while (0); 70#define DEBUG5(x) do {x;} while (0)
96#else 71#else
97#define DEBUG5(x) do {} while (0); 72#define DEBUG5(x) do {} while (0)
98#endif 73#endif
99 74
100#if defined(QL_DEBUG_LEVEL_7) 75#if defined(QL_DEBUG_LEVEL_7)
101#define DEBUG7(x) do {x;} while (0); 76#define DEBUG7(x) do {x;} while (0)
102#else 77#else
103#define DEBUG7(x) do {} while (0); 78#define DEBUG7(x) do {} while (0)
104#endif 79#endif
105 80
106#if defined(QL_DEBUG_LEVEL_9) 81#if defined(QL_DEBUG_LEVEL_9)
107#define DEBUG9(x) do {x;} while (0); 82#define DEBUG9(x) do {x;} while (0)
108#define DEBUG9_10(x) do {x;} while (0); 83#define DEBUG9_10(x) do {x;} while (0)
109#define DEBUG2_9_10(x) do {x;} while (0);
110#else 84#else
111#define DEBUG9(x) do {} while (0); 85#define DEBUG9(x) do {} while (0)
112#endif 86#endif
113 87
114#if defined(QL_DEBUG_LEVEL_10) 88#if defined(QL_DEBUG_LEVEL_10)
115#define DEBUG10(x) do {x;} while (0); 89#define DEBUG10(x) do {x;} while (0)
116#define DEBUG2_9_10(x) do {x;} while (0); 90#define DEBUG9_10(x) do {x;} while (0)
117#define DEBUG9_10(x) do {x;} while (0);
118#else 91#else
119#define DEBUG10(x) do {} while (0); 92#define DEBUG10(x) do {} while (0)
120 #if !defined(DEBUG2_9_10)
121 #define DEBUG2_9_10(x) do {} while (0);
122 #endif
123 #if !defined(DEBUG9_10) 93 #if !defined(DEBUG9_10)
124 #define DEBUG9_10(x) do {} while (0); 94 #define DEBUG9_10(x) do {} while (0)
125 #endif 95 #endif
126#endif 96#endif
127 97
128#if defined(QL_DEBUG_LEVEL_11) 98#if defined(QL_DEBUG_LEVEL_11)
129#define DEBUG11(x) do{x;} while(0); 99#define DEBUG11(x) do{x;} while(0)
130#if !defined(DEBUG2_11)
131#define DEBUG2_11(x) do{x;} while(0);
132#endif
133#if !defined(DEBUG2_3_11)
134#define DEBUG2_3_11(x) do{x;} while(0);
135#endif
136#if !defined(DEBUG3_11) 100#if !defined(DEBUG3_11)
137#define DEBUG3_11(x) do{x;} while(0); 101#define DEBUG3_11(x) do{x;} while(0)
138#endif 102#endif
139#else 103#else
140#define DEBUG11(x) do{} while(0); 104#define DEBUG11(x) do{} while(0)
141 #if !defined(QL_DEBUG_LEVEL_2)
142 #define DEBUG2_11(x) do{} while(0);
143 #if !defined(QL_DEBUG_LEVEL_3)
144 #define DEBUG2_3_11(x) do{} while(0);
145 #endif
146 #endif
147 #if !defined(QL_DEBUG_LEVEL_3) 105 #if !defined(QL_DEBUG_LEVEL_3)
148 #define DEBUG3_11(x) do{} while(0); 106 #define DEBUG3_11(x) do{} while(0)
149 #endif 107 #endif
150#endif 108#endif
151 109
152#if defined(QL_DEBUG_LEVEL_12) 110#if defined(QL_DEBUG_LEVEL_12)
153#define DEBUG12(x) do {x;} while (0); 111#define DEBUG12(x) do {x;} while (0)
154#else 112#else
155#define DEBUG12(x) do {} while (0); 113#define DEBUG12(x) do {} while (0)
156#endif 114#endif
157 115
158#if defined(QL_DEBUG_LEVEL_13) 116#if defined(QL_DEBUG_LEVEL_13)
159#define DEBUG13(x) do {x;} while (0) 117#define DEBUG13(x) do {x;} while (0)
160#if !defined(DEBUG2_13)
161#define DEBUG2_13(x) do {x;} while(0)
162#endif
163#else 118#else
164#define DEBUG13(x) do {} while (0) 119#define DEBUG13(x) do {} while (0)
165#if !defined(QL_DEBUG_LEVEL_2)
166#define DEBUG2_13(x) do {} while(0)
167#endif
168#endif 120#endif
169 121
170#if defined(QL_DEBUG_LEVEL_14) 122#if defined(QL_DEBUG_LEVEL_14)
@@ -176,9 +128,6 @@
176/* 128/*
177 * Firmware Dump structure definition 129 * Firmware Dump structure definition
178 */ 130 */
179#define FW_DUMP_SIZE_128K 0xBC000
180#define FW_DUMP_SIZE_512K 0x2FC000
181#define FW_DUMP_SIZE_1M 0x5FC000
182 131
183struct qla2300_fw_dump { 132struct qla2300_fw_dump {
184 uint16_t hccr; 133 uint16_t hccr;
@@ -224,8 +173,6 @@ struct qla2100_fw_dump {
224 uint16_t risc_ram[0xf000]; 173 uint16_t risc_ram[0xf000];
225}; 174};
226 175
227#define FW_DUMP_SIZE_24XX 0x2B0000
228
229struct qla24xx_fw_dump { 176struct qla24xx_fw_dump {
230 uint32_t host_status; 177 uint32_t host_status;
231 uint32_t host_reg[32]; 178 uint32_t host_reg[32];
@@ -257,3 +204,39 @@ struct qla24xx_fw_dump {
257 uint32_t code_ram[0x2000]; 204 uint32_t code_ram[0x2000];
258 uint32_t ext_mem[1]; 205 uint32_t ext_mem[1];
259}; 206};
207
208#define EFT_NUM_BUFFERS 4
209#define EFT_BYTES_PER_BUFFER 0x4000
210#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
211
212struct qla2xxx_fw_dump {
213 uint8_t signature[4];
214 uint32_t version;
215
216 uint32_t fw_major_version;
217 uint32_t fw_minor_version;
218 uint32_t fw_subminor_version;
219 uint32_t fw_attributes;
220
221 uint32_t vendor;
222 uint32_t device;
223 uint32_t subsystem_vendor;
224 uint32_t subsystem_device;
225
226 uint32_t fixed_size;
227 uint32_t mem_size;
228 uint32_t req_q_size;
229 uint32_t rsp_q_size;
230
231 uint32_t eft_size;
232 uint32_t eft_addr_l;
233 uint32_t eft_addr_h;
234
235 uint32_t header_size;
236
237 union {
238 struct qla2100_fw_dump isp21;
239 struct qla2300_fw_dump isp23;
240 struct qla24xx_fw_dump isp24;
241 } isp;
242};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index d6f6579cfd27..0930260aec2c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ 487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ 488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
489 /* used. */ 489 /* used. */
490#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
490#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ 491#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
491#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ 492#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
492#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ 493#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
@@ -608,7 +609,9 @@ typedef struct {
608#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 609#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
609#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ 610#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
610#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */ 611#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */
612#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */
611#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */ 613#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */
614#define MBC_READ_SFP 0x31 /* Read SFP Data. */
612#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */ 615#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
613#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */ 616#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
614#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */ 617#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
@@ -618,6 +621,9 @@ typedef struct {
618#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 621#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
619#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 622#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
620 623
624#define TC_ENABLE 4
625#define TC_DISABLE 5
626
621/* Firmware return data sizes */ 627/* Firmware return data sizes */
622#define FCAL_MAP_SIZE 128 628#define FCAL_MAP_SIZE 128
623 629
@@ -1997,7 +2003,6 @@ struct isp_operations {
1997 uint32_t); 2003 uint32_t);
1998 2004
1999 void (*fw_dump) (struct scsi_qla_host *, int); 2005 void (*fw_dump) (struct scsi_qla_host *, int);
2000 void (*ascii_fw_dump) (struct scsi_qla_host *);
2001 2006
2002 int (*beacon_on) (struct scsi_qla_host *); 2007 int (*beacon_on) (struct scsi_qla_host *);
2003 int (*beacon_off) (struct scsi_qla_host *); 2008 int (*beacon_off) (struct scsi_qla_host *);
@@ -2041,6 +2046,7 @@ typedef struct scsi_qla_host {
2041 uint32_t enable_led_scheme :1; 2046 uint32_t enable_led_scheme :1;
2042 uint32_t msi_enabled :1; 2047 uint32_t msi_enabled :1;
2043 uint32_t msix_enabled :1; 2048 uint32_t msix_enabled :1;
2049 uint32_t disable_serdes :1;
2044 } flags; 2050 } flags;
2045 2051
2046 atomic_t loop_state; 2052 atomic_t loop_state;
@@ -2134,7 +2140,7 @@ typedef struct scsi_qla_host {
2134 mempool_t *srb_mempool; 2140 mempool_t *srb_mempool;
2135 2141
2136 /* This spinlock is used to protect "io transactions", you must 2142 /* This spinlock is used to protect "io transactions", you must
2137 * aquire it before doing any IO to the card, eg with RD_REG*() and 2143 * acquire it before doing any IO to the card, eg with RD_REG*() and
2138 * WRT_REG*() for the duration of your entire commandtransaction. 2144 * WRT_REG*() for the duration of your entire commandtransaction.
2139 * 2145 *
2140 * This spinlock is of lower priority than the io request lock. 2146 * This spinlock is of lower priority than the io request lock.
@@ -2238,6 +2244,11 @@ typedef struct scsi_qla_host {
2238 struct sns_cmd_pkt *sns_cmd; 2244 struct sns_cmd_pkt *sns_cmd;
2239 dma_addr_t sns_cmd_dma; 2245 dma_addr_t sns_cmd_dma;
2240 2246
2247#define SFP_DEV_SIZE 256
2248#define SFP_BLOCK_SIZE 64
2249 void *sfp_data;
2250 dma_addr_t sfp_data_dma;
2251
2241 struct task_struct *dpc_thread; 2252 struct task_struct *dpc_thread;
2242 uint8_t dpc_active; /* DPC routine is active */ 2253 uint8_t dpc_active; /* DPC routine is active */
2243 2254
@@ -2303,11 +2314,12 @@ typedef struct scsi_qla_host {
2303 uint16_t fw_seriallink_options24[4]; 2314 uint16_t fw_seriallink_options24[4];
2304 2315
2305 /* Firmware dump information. */ 2316 /* Firmware dump information. */
2306 void *fw_dump; 2317 struct qla2xxx_fw_dump *fw_dump;
2318 uint32_t fw_dump_len;
2307 int fw_dumped; 2319 int fw_dumped;
2308 int fw_dump_reading; 2320 int fw_dump_reading;
2309 char *fw_dump_buffer; 2321 dma_addr_t eft_dma;
2310 int fw_dump_buffer_len; 2322 void *eft;
2311 2323
2312 uint8_t host_str[16]; 2324 uint8_t host_str[16];
2313 uint32_t pci_attr; 2325 uint32_t pci_attr;
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h
index a8fc0ffc7fc5..dd435410dfa2 100644
--- a/drivers/scsi/qla2xxx/qla_devtbl.h
+++ b/drivers/scsi/qla2xxx/qla_devtbl.h
@@ -1,4 +1,4 @@
1#define QLA_MODEL_NAMES 0x4A 1#define QLA_MODEL_NAMES 0x57
2 2
3/* 3/*
4 * Adapter model names and descriptions. 4 * Adapter model names and descriptions.
@@ -76,6 +76,19 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = {
76 "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ 76 "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */
77 "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ 77 "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */
78 "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ 78 "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */
79 " ", " ", /* 0x148 */ 79 "HP AE369A", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x148 */
80 "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */ 80 "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */
81 " ", " ", /* 0x14a */
82 " ", " ", /* 0x14b */
83 "QMC2432M", "IBM eServer BC 4Gb FC Expansion Card CFFE", /* 0x14c */
84 "QMC2422M", "IBM eServer BC 4Gb FC Expansion Card CFFX", /* 0x14d */
85 "QLE220", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x14e */
86 " ", " ", /* 0x14f */
87 " ", " ", /* 0x150 */
88 " ", " ", /* 0x151 */
89 "QME2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x152 */
90 "QMH2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x153 */
91 " ", " ", /* 0x154 */
92 "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */
93 "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */
81}; 94};
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 3af478663be7..a0a722cf4237 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -141,7 +141,7 @@ struct nvram_24xx {
141 * BIT 2 = Enable Memory Map BIOS 141 * BIT 2 = Enable Memory Map BIOS
142 * BIT 3 = Enable Selectable Boot 142 * BIT 3 = Enable Selectable Boot
143 * BIT 4 = Disable RISC code load 143 * BIT 4 = Disable RISC code load
144 * BIT 5 = 144 * BIT 5 = Disable Serdes
145 * BIT 6 = 145 * BIT 6 =
146 * BIT 7 = 146 * BIT 7 =
147 * 147 *
@@ -278,7 +278,7 @@ struct init_cb_24xx {
278 uint16_t response_q_length; 278 uint16_t response_q_length;
279 uint16_t request_q_length; 279 uint16_t request_q_length;
280 280
281 uint16_t link_down_timeout; /* Milliseconds. */ 281 uint16_t link_down_on_nos; /* Milliseconds. */
282 282
283 uint16_t prio_request_q_length; 283 uint16_t prio_request_q_length;
284 284
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 164d53ccbfd0..8311ac2b93a8 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -31,13 +31,9 @@ extern void qla2x00_update_fw_options(struct scsi_qla_host *);
31extern void qla24xx_update_fw_options(scsi_qla_host_t *); 31extern void qla24xx_update_fw_options(scsi_qla_host_t *);
32extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); 32extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
33extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 33extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
34extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
35
36extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
37 34
38extern int qla2x00_loop_resync(scsi_qla_host_t *); 35extern int qla2x00_loop_resync(scsi_qla_host_t *);
39 36
40extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
41extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 37extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
42extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 38extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
43 39
@@ -51,6 +47,8 @@ extern int qla2x00_abort_isp(scsi_qla_host_t *);
51extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 47extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
52extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *); 48extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *);
53 49
50extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
51
54/* 52/*
55 * Global Data in qla_os.c source file. 53 * Global Data in qla_os.c source file.
56 */ 54 */
@@ -61,6 +59,8 @@ extern int qlport_down_retry;
61extern int ql2xplogiabsentdevice; 59extern int ql2xplogiabsentdevice;
62extern int ql2xloginretrycount; 60extern int ql2xloginretrycount;
63extern int ql2xfdmienable; 61extern int ql2xfdmienable;
62extern int ql2xallocfwdump;
63extern int extended_error_logging;
64 64
65extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 65extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
66 66
@@ -80,8 +80,6 @@ extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
80/* 80/*
81 * Global Function Prototypes in qla_iocb.c source file. 81 * Global Function Prototypes in qla_iocb.c source file.
82 */ 82 */
83extern void qla2x00_isp_cmd(scsi_qla_host_t *);
84
85extern uint16_t qla2x00_calc_iocbs_32(uint16_t); 83extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
86extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 84extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
87extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 85extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -204,6 +202,12 @@ qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
204extern int 202extern int
205qla2x00_stop_firmware(scsi_qla_host_t *); 203qla2x00_stop_firmware(scsi_qla_host_t *);
206 204
205extern int
206qla2x00_trace_control(scsi_qla_host_t *, uint16_t, dma_addr_t, uint16_t);
207
208extern int
209qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
210
207/* 211/*
208 * Global Function Prototypes in qla_isr.c source file. 212 * Global Function Prototypes in qla_isr.c source file.
209 */ 213 */
@@ -254,9 +258,6 @@ extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
254extern void qla2100_fw_dump(scsi_qla_host_t *, int); 258extern void qla2100_fw_dump(scsi_qla_host_t *, int);
255extern void qla2300_fw_dump(scsi_qla_host_t *, int); 259extern void qla2300_fw_dump(scsi_qla_host_t *, int);
256extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 260extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
257extern void qla2100_ascii_fw_dump(scsi_qla_host_t *);
258extern void qla2300_ascii_fw_dump(scsi_qla_host_t *);
259extern void qla24xx_ascii_fw_dump(scsi_qla_host_t *);
260extern void qla2x00_dump_regs(scsi_qla_host_t *); 261extern void qla2x00_dump_regs(scsi_qla_host_t *);
261extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 262extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
262extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); 263extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
@@ -280,13 +281,6 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
280extern int qla2x00_fdmi_register(scsi_qla_host_t *); 281extern int qla2x00_fdmi_register(scsi_qla_host_t *);
281 282
282/* 283/*
283 * Global Function Prototypes in qla_xioctl.c source file.
284 */
285#define qla2x00_enqueue_aen(ha, cmd, mode) do { } while (0)
286#define qla2x00_alloc_ioctl_mem(ha) (0)
287#define qla2x00_free_ioctl_mem(ha) do { } while (0)
288
289/*
290 * Global Function Prototypes in qla_attr.c source file. 284 * Global Function Prototypes in qla_attr.c source file.
291 */ 285 */
292struct class_device_attribute; 286struct class_device_attribute;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aef093db597e..859649160caa 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -39,6 +39,8 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
39 39
40static int qla2x00_restart_isp(scsi_qla_host_t *); 40static int qla2x00_restart_isp(scsi_qla_host_t *);
41 41
42static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev);
43
42/****************************************************************************/ 44/****************************************************************************/
43/* QLogic ISP2x00 Hardware Support Functions. */ 45/* QLogic ISP2x00 Hardware Support Functions. */
44/****************************************************************************/ 46/****************************************************************************/
@@ -89,6 +91,17 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
89 91
90 ha->isp_ops.nvram_config(ha); 92 ha->isp_ops.nvram_config(ha);
91 93
94 if (ha->flags.disable_serdes) {
95 /* Mask HBA via NVRAM settings? */
96 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
97 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
98 ha->port_name[0], ha->port_name[1],
99 ha->port_name[2], ha->port_name[3],
100 ha->port_name[4], ha->port_name[5],
101 ha->port_name[6], ha->port_name[7]);
102 return QLA_FUNCTION_FAILED;
103 }
104
92 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 105 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
93 106
94 retry = 10; 107 retry = 10;
@@ -770,29 +783,104 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
770 return rval; 783 return rval;
771} 784}
772 785
773static void 786void
774qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 787qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
775{ 788{
776 uint32_t dump_size = 0; 789 int rval;
790 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
791 eft_size;
792 dma_addr_t eft_dma;
793 void *eft;
794
795 if (ha->fw_dump) {
796 qla_printk(KERN_WARNING, ha,
797 "Firmware dump previously allocated.\n");
798 return;
799 }
777 800
778 ha->fw_dumped = 0; 801 ha->fw_dumped = 0;
802 fixed_size = mem_size = eft_size = 0;
779 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 803 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
780 dump_size = sizeof(struct qla2100_fw_dump); 804 fixed_size = sizeof(struct qla2100_fw_dump);
781 } else if (IS_QLA23XX(ha)) { 805 } else if (IS_QLA23XX(ha)) {
782 dump_size = sizeof(struct qla2300_fw_dump); 806 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
783 dump_size += (ha->fw_memory_size - 0x11000) * sizeof(uint16_t); 807 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
784 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 808 sizeof(uint16_t);
785 dump_size = sizeof(struct qla24xx_fw_dump); 809 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
786 dump_size += (ha->fw_memory_size - 0x100000) * sizeof(uint32_t); 810 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
811 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
812 sizeof(uint32_t);
813
814 /* Allocate memory for Extended Trace Buffer. */
815 eft = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &eft_dma,
816 GFP_KERNEL);
817 if (!eft) {
818 qla_printk(KERN_WARNING, ha, "Unable to allocate "
819 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
820 goto cont_alloc;
821 }
822
823 rval = qla2x00_trace_control(ha, TC_ENABLE, eft_dma,
824 EFT_NUM_BUFFERS);
825 if (rval) {
826 qla_printk(KERN_WARNING, ha, "Unable to initialize "
827 "EFT (%d).\n", rval);
828 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, eft,
829 eft_dma);
830 goto cont_alloc;
831 }
832
833 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
834 EFT_SIZE / 1024);
835
836 eft_size = EFT_SIZE;
837 memset(eft, 0, eft_size);
838 ha->eft_dma = eft_dma;
839 ha->eft = eft;
787 } 840 }
841cont_alloc:
842 req_q_size = ha->request_q_length * sizeof(request_t);
843 rsp_q_size = ha->response_q_length * sizeof(response_t);
844
845 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
846 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
847 eft_size;
788 848
789 ha->fw_dump = vmalloc(dump_size); 849 ha->fw_dump = vmalloc(dump_size);
790 if (ha->fw_dump) 850 if (!ha->fw_dump) {
791 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware "
792 "dump...\n", dump_size / 1024);
793 else
794 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 851 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
795 "firmware dump!!!\n", dump_size / 1024); 852 "firmware dump!!!\n", dump_size / 1024);
853
854 if (ha->eft) {
855 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
856 ha->eft_dma);
857 ha->eft = NULL;
858 ha->eft_dma = 0;
859 }
860 return;
861 }
862
863 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
864 dump_size / 1024);
865
866 ha->fw_dump_len = dump_size;
867 ha->fw_dump->signature[0] = 'Q';
868 ha->fw_dump->signature[1] = 'L';
869 ha->fw_dump->signature[2] = 'G';
870 ha->fw_dump->signature[3] = 'C';
871 ha->fw_dump->version = __constant_htonl(1);
872
873 ha->fw_dump->fixed_size = htonl(fixed_size);
874 ha->fw_dump->mem_size = htonl(mem_size);
875 ha->fw_dump->req_q_size = htonl(req_q_size);
876 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
877
878 ha->fw_dump->eft_size = htonl(eft_size);
879 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
880 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
881
882 ha->fw_dump->header_size =
883 htonl(offsetof(struct qla2xxx_fw_dump, isp));
796} 884}
797 885
798/** 886/**
@@ -810,8 +898,6 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
810 dma_addr_t request_dma; 898 dma_addr_t request_dma;
811 request_t *request_ring; 899 request_t *request_ring;
812 900
813 qla2x00_alloc_fw_dump(ha);
814
815 /* Valid only on recent ISPs. */ 901 /* Valid only on recent ISPs. */
816 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 902 if (IS_QLA2100(ha) || IS_QLA2200(ha))
817 return; 903 return;
@@ -883,6 +969,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
883 &ha->fw_subminor_version, 969 &ha->fw_subminor_version,
884 &ha->fw_attributes, &ha->fw_memory_size); 970 &ha->fw_attributes, &ha->fw_memory_size);
885 qla2x00_resize_request_q(ha); 971 qla2x00_resize_request_q(ha);
972
973 if (ql2xallocfwdump)
974 qla2x00_alloc_fw_dump(ha);
886 } 975 }
887 } else { 976 } else {
888 DEBUG2(printk(KERN_INFO 977 DEBUG2(printk(KERN_INFO
@@ -1186,8 +1275,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1186 rval = QLA_FUNCTION_FAILED; 1275 rval = QLA_FUNCTION_FAILED;
1187 1276
1188 if (atomic_read(&ha->loop_down_timer) && 1277 if (atomic_read(&ha->loop_down_timer) &&
1189 (fw_state >= FSTATE_LOSS_OF_SYNC || 1278 fw_state != FSTATE_READY) {
1190 fw_state == FSTATE_WAIT_AL_PA)) {
1191 /* Loop down. Timeout on min_wait for states 1279 /* Loop down. Timeout on min_wait for states
1192 * other than Wait for Login. 1280 * other than Wait for Login.
1193 */ 1281 */
@@ -1555,6 +1643,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1555 /* 1643 /*
1556 * Set host adapter parameters. 1644 * Set host adapter parameters.
1557 */ 1645 */
1646 if (nv->host_p[0] & BIT_7)
1647 extended_error_logging = 1;
1558 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 1648 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
1559 /* Always load RISC code on non ISP2[12]00 chips. */ 1649 /* Always load RISC code on non ISP2[12]00 chips. */
1560 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1650 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -1563,6 +1653,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1563 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1653 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1564 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1654 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1565 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 1655 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1656 ha->flags.disable_serdes = 0;
1566 1657
1567 ha->operating_mode = 1658 ha->operating_mode =
1568 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1659 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -1701,7 +1792,7 @@ qla2x00_rport_del(void *data)
1701 * 1792 *
1702 * Returns a pointer to the allocated fcport, or NULL, if none available. 1793 * Returns a pointer to the allocated fcport, or NULL, if none available.
1703 */ 1794 */
1704fc_port_t * 1795static fc_port_t *
1705qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1796qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1706{ 1797{
1707 fc_port_t *fcport; 1798 fc_port_t *fcport;
@@ -2258,8 +2349,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2258 } 2349 }
2259 2350
2260 /* Remove device from the new list and add it to DB */ 2351 /* Remove device from the new list and add it to DB */
2261 list_del(&fcport->list); 2352 list_move_tail(&fcport->list, &ha->fcports);
2262 list_add_tail(&fcport->list, &ha->fcports);
2263 2353
2264 /* Login and update database */ 2354 /* Login and update database */
2265 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2355 qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
@@ -2498,7 +2588,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2498 * Context: 2588 * Context:
2499 * Kernel context. 2589 * Kernel context.
2500 */ 2590 */
2501int 2591static int
2502qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2592qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2503{ 2593{
2504 int rval; 2594 int rval;
@@ -2973,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
2973int 3063int
2974qla2x00_abort_isp(scsi_qla_host_t *ha) 3064qla2x00_abort_isp(scsi_qla_host_t *ha)
2975{ 3065{
3066 int rval;
2976 unsigned long flags = 0; 3067 unsigned long flags = 0;
2977 uint16_t cnt; 3068 uint16_t cnt;
2978 srb_t *sp; 3069 srb_t *sp;
@@ -3029,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3029 3120
3030 ha->isp_abort_cnt = 0; 3121 ha->isp_abort_cnt = 0;
3031 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3122 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3123
3124 if (ha->eft) {
3125 rval = qla2x00_trace_control(ha, TC_ENABLE,
3126 ha->eft_dma, EFT_NUM_BUFFERS);
3127 if (rval) {
3128 qla_printk(KERN_WARNING, ha,
3129 "Unable to reinitialize EFT "
3130 "(%d).\n", rval);
3131 }
3132 }
3032 } else { /* failed the ISP abort */ 3133 } else { /* failed the ISP abort */
3033 ha->flags.online = 1; 3134 ha->flags.online = 1;
3034 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3135 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
@@ -3049,14 +3150,14 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3049 ha->isp_abort_cnt--; 3150 ha->isp_abort_cnt--;
3050 DEBUG(printk("qla%ld: ISP abort - " 3151 DEBUG(printk("qla%ld: ISP abort - "
3051 "retry remaining %d\n", 3152 "retry remaining %d\n",
3052 ha->host_no, ha->isp_abort_cnt);) 3153 ha->host_no, ha->isp_abort_cnt));
3053 status = 1; 3154 status = 1;
3054 } 3155 }
3055 } else { 3156 } else {
3056 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3157 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3057 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3158 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3058 "- retrying (%d) more times\n", 3159 "- retrying (%d) more times\n",
3059 ha->host_no, ha->isp_abort_cnt);) 3160 ha->host_no, ha->isp_abort_cnt));
3060 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3161 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3061 status = 1; 3162 status = 1;
3062 } 3163 }
@@ -3070,7 +3171,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3070 } else { 3171 } else {
3071 DEBUG(printk(KERN_INFO 3172 DEBUG(printk(KERN_INFO
3072 "qla2x00_abort_isp(%ld): exiting.\n", 3173 "qla2x00_abort_isp(%ld): exiting.\n",
3073 ha->host_no);) 3174 ha->host_no));
3074 } 3175 }
3075 3176
3076 return(status); 3177 return(status);
@@ -3146,7 +3247,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3146 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3247 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
3147 if (!(status = qla2x00_fw_ready(ha))) { 3248 if (!(status = qla2x00_fw_ready(ha))) {
3148 DEBUG(printk("%s(): Start configure loop, " 3249 DEBUG(printk("%s(): Start configure loop, "
3149 "status = %d\n", __func__, status);) 3250 "status = %d\n", __func__, status));
3150 3251
3151 /* Issue a marker after FW becomes ready. */ 3252 /* Issue a marker after FW becomes ready. */
3152 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3253 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
@@ -3170,7 +3271,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3170 3271
3171 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3272 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
3172 __func__, 3273 __func__,
3173 status);) 3274 status));
3174 } 3275 }
3175 return (status); 3276 return (status);
3176} 3277}
@@ -3290,7 +3391,6 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3290 nv->node_name[6] = 0x55; 3391 nv->node_name[6] = 0x55;
3291 nv->node_name[7] = 0x86; 3392 nv->node_name[7] = 0x86;
3292 nv->login_retry_count = __constant_cpu_to_le16(8); 3393 nv->login_retry_count = __constant_cpu_to_le16(8);
3293 nv->link_down_timeout = __constant_cpu_to_le16(200);
3294 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3394 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3295 nv->login_timeout = __constant_cpu_to_le16(0); 3395 nv->login_timeout = __constant_cpu_to_le16(0);
3296 nv->firmware_options_1 = 3396 nv->firmware_options_1 =
@@ -3319,7 +3419,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3319 *dptr1++ = *dptr2++; 3419 *dptr1++ = *dptr2++;
3320 3420
3321 icb->login_retry_count = nv->login_retry_count; 3421 icb->login_retry_count = nv->login_retry_count;
3322 icb->link_down_timeout = nv->link_down_timeout; 3422 icb->link_down_on_nos = nv->link_down_on_nos;
3323 3423
3324 /* Copy 2nd segment. */ 3424 /* Copy 2nd segment. */
3325 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 3425 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
@@ -3374,6 +3474,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3374 ha->flags.enable_lip_full_login = 1; 3474 ha->flags.enable_lip_full_login = 1;
3375 ha->flags.enable_target_reset = 1; 3475 ha->flags.enable_target_reset = 1;
3376 ha->flags.enable_led_scheme = 0; 3476 ha->flags.enable_led_scheme = 0;
3477 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
3377 3478
3378 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 3479 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
3379 (BIT_6 | BIT_5 | BIT_4)) >> 4; 3480 (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -3473,7 +3574,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3473 return (rval); 3574 return (rval);
3474} 3575}
3475 3576
3476int 3577static int
3477qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3578qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3478{ 3579{
3479 int rval; 3580 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8c769cfaa14c..c5b3c610a32a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *); 15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *); 16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
18 19
19/** 20/**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 21 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -470,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
470 mrk24->nport_handle = cpu_to_le16(loop_id); 471 mrk24->nport_handle = cpu_to_le16(loop_id);
471 mrk24->lun[1] = LSB(lun); 472 mrk24->lun[1] = LSB(lun);
472 mrk24->lun[2] = MSB(lun); 473 mrk24->lun[2] = MSB(lun);
474 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
473 } else { 475 } else {
474 SET_TARGET_ID(ha, mrk->target, loop_id); 476 SET_TARGET_ID(ha, mrk->target, loop_id);
475 mrk->lun = cpu_to_le16(lun); 477 mrk->lun = cpu_to_le16(lun);
@@ -574,7 +576,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
574 * 576 *
575 * Note: The caller must hold the hardware lock before calling this routine. 577 * Note: The caller must hold the hardware lock before calling this routine.
576 */ 578 */
577void 579static void
578qla2x00_isp_cmd(scsi_qla_host_t *ha) 580qla2x00_isp_cmd(scsi_qla_host_t *ha)
579{ 581{
580 device_reg_t __iomem *reg = ha->iobase; 582 device_reg_t __iomem *reg = ha->iobase;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b28ac0a27e25..de0613135f70 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -395,10 +395,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
395 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 395 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
396 396
397 ha->flags.management_server_logged_in = 0; 397 ha->flags.management_server_logged_in = 0;
398
399 /* Update AEN queue. */
400 qla2x00_enqueue_aen(ha, MBA_LIP_OCCURRED, NULL);
401
402 break; 398 break;
403 399
404 case MBA_LOOP_UP: /* Loop Up Event */ 400 case MBA_LOOP_UP: /* Loop Up Event */
@@ -418,9 +414,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
418 link_speed); 414 link_speed);
419 415
420 ha->flags.management_server_logged_in = 0; 416 ha->flags.management_server_logged_in = 0;
421
422 /* Update AEN queue. */
423 qla2x00_enqueue_aen(ha, MBA_LOOP_UP, NULL);
424 break; 417 break;
425 418
426 case MBA_LOOP_DOWN: /* Loop Down Event */ 419 case MBA_LOOP_DOWN: /* Loop Down Event */
@@ -439,9 +432,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
439 ha->link_data_rate = LDR_UNKNOWN; 432 ha->link_data_rate = LDR_UNKNOWN;
440 if (ql2xfdmienable) 433 if (ql2xfdmienable)
441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 434 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
442
443 /* Update AEN queue. */
444 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);
445 break; 435 break;
446 436
447 case MBA_LIP_RESET: /* LIP reset occurred */ 437 case MBA_LIP_RESET: /* LIP reset occurred */
@@ -460,10 +450,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 450
461 ha->operating_mode = LOOP; 451 ha->operating_mode = LOOP;
462 ha->flags.management_server_logged_in = 0; 452 ha->flags.management_server_logged_in = 0;
463
464 /* Update AEN queue. */
465 qla2x00_enqueue_aen(ha, MBA_LIP_RESET, NULL);
466
467 break; 453 break;
468 454
469 case MBA_POINT_TO_POINT: /* Point-to-Point */ 455 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -545,9 +531,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
545 531
546 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 532 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
547 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 533 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
548
549 /* Update AEN queue. */
550 qla2x00_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
551 break; 534 break;
552 535
553 case MBA_RSCN_UPDATE: /* State Change Registration */ 536 case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -584,9 +567,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
584 567
585 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 568 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
586 set_bit(RSCN_UPDATE, &ha->dpc_flags); 569 set_bit(RSCN_UPDATE, &ha->dpc_flags);
587
588 /* Update AEN queue. */
589 qla2x00_enqueue_aen(ha, MBA_RSCN_UPDATE, &mb[0]);
590 break; 570 break;
591 571
592 /* case MBA_RIO_RESPONSE: */ 572 /* case MBA_RIO_RESPONSE: */
@@ -607,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
607 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
608 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
609 break; 589 break;
590
591 case MBA_TRACE_NOTIFICATION:
592 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
593 ha->host_no, mb[1], mb[2]));
594 break;
610 } 595 }
611} 596}
612 597
@@ -1452,8 +1437,8 @@ qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1452 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", 1437 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1453 __func__, ha->host_no, pkt, pkt->handle)); 1438 __func__, ha->host_no, pkt, pkt->handle));
1454 1439
1455 DEBUG9(printk("%s: ct pkt dump:\n", __func__);) 1440 DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1456 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx));) 1441 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1457 1442
1458 /* Validate handle. */ 1443 /* Validate handle. */
1459 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1444 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d6cb3bd1a29a..879f281e2ea2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -13,13 +13,13 @@ qla2x00_mbx_sem_timeout(unsigned long data)
13{ 13{
14 struct semaphore *sem_ptr = (struct semaphore *)data; 14 struct semaphore *sem_ptr = (struct semaphore *)data;
15 15
16 DEBUG11(printk("qla2x00_sem_timeout: entered.\n");) 16 DEBUG11(printk("qla2x00_sem_timeout: entered.\n"));
17 17
18 if (sem_ptr != NULL) { 18 if (sem_ptr != NULL) {
19 up(sem_ptr); 19 up(sem_ptr);
20 } 20 }
21 21
22 DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n");) 22 DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n"));
23} 23}
24 24
25/* 25/*
@@ -61,7 +61,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
61 rval = QLA_SUCCESS; 61 rval = QLA_SUCCESS;
62 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 62 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
63 63
64 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 64 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
65 65
66 /* 66 /*
67 * Wait for active mailbox commands to finish by waiting at most tov 67 * Wait for active mailbox commands to finish by waiting at most tov
@@ -72,7 +72,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
72 if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) { 72 if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) {
73 /* Timeout occurred. Return error. */ 73 /* Timeout occurred. Return error. */
74 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 74 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
75 "Exiting.\n", __func__, ha->host_no);) 75 "Exiting.\n", __func__, ha->host_no));
76 return QLA_FUNCTION_TIMEOUT; 76 return QLA_FUNCTION_TIMEOUT;
77 } 77 }
78 } 78 }
@@ -86,7 +86,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
86 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags); 86 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
87 87
88 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 88 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
89 ha->host_no, mcp->mb[0]);) 89 ha->host_no, mcp->mb[0]));
90 90
91 spin_lock_irqsave(&ha->hardware_lock, flags); 91 spin_lock_irqsave(&ha->hardware_lock, flags);
92 92
@@ -131,14 +131,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
131 131
132 /* Unlock mbx registers and wait for interrupt */ 132 /* Unlock mbx registers and wait for interrupt */
133 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 133 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
134 "jiffies=%lx.\n", __func__, ha->host_no, jiffies);) 134 "jiffies=%lx.\n", __func__, ha->host_no, jiffies));
135 135
136 /* Wait for mbx cmd completion until timeout */ 136 /* Wait for mbx cmd completion until timeout */
137 137
138 if (!abort_active && io_lock_on) { 138 if (!abort_active && io_lock_on) {
139 /* sleep on completion semaphore */ 139 /* sleep on completion semaphore */
140 DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n", 140 DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n",
141 __func__, ha->host_no);) 141 __func__, ha->host_no));
142 142
143 init_timer(&tmp_intr_timer); 143 init_timer(&tmp_intr_timer);
144 tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem; 144 tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem;
@@ -147,11 +147,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
147 (void (*)(unsigned long))qla2x00_mbx_sem_timeout; 147 (void (*)(unsigned long))qla2x00_mbx_sem_timeout;
148 148
149 DEBUG11(printk("%s(%ld): Adding timer.\n", __func__, 149 DEBUG11(printk("%s(%ld): Adding timer.\n", __func__,
150 ha->host_no);) 150 ha->host_no));
151 add_timer(&tmp_intr_timer); 151 add_timer(&tmp_intr_timer);
152 152
153 DEBUG11(printk("%s(%ld): going to unlock & sleep. " 153 DEBUG11(printk("%s(%ld): going to unlock & sleep. "
154 "time=0x%lx.\n", __func__, ha->host_no, jiffies);) 154 "time=0x%lx.\n", __func__, ha->host_no, jiffies));
155 155
156 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 156 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
157 157
@@ -170,14 +170,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
170 down(&ha->mbx_intr_sem); 170 down(&ha->mbx_intr_sem);
171 171
172 DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__, 172 DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__,
173 ha->host_no, jiffies);) 173 ha->host_no, jiffies));
174 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 174 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
175 175
176 /* delete the timer */ 176 /* delete the timer */
177 del_timer(&tmp_intr_timer); 177 del_timer(&tmp_intr_timer);
178 } else { 178 } else {
179 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 179 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
180 ha->host_no, command);) 180 ha->host_no, command));
181 181
182 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 182 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
183 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 183 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -209,7 +209,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
209 uint16_t *iptr2; 209 uint16_t *iptr2;
210 210
211 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 211 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
212 ha->host_no, command);) 212 ha->host_no, command));
213 213
214 /* Got interrupt. Clear the flag. */ 214 /* Got interrupt. Clear the flag. */
215 ha->flags.mbox_int = 0; 215 ha->flags.mbox_int = 0;
@@ -266,7 +266,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
266 266
267 if (!abort_active) { 267 if (!abort_active) {
268 DEBUG11(printk("%s(%ld): checking for additional resp " 268 DEBUG11(printk("%s(%ld): checking for additional resp "
269 "interrupt.\n", __func__, ha->host_no);) 269 "interrupt.\n", __func__, ha->host_no));
270 270
271 /* polling mode for non isp_abort commands. */ 271 /* polling mode for non isp_abort commands. */
272 qla2x00_poll(ha); 272 qla2x00_poll(ha);
@@ -277,9 +277,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
277 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 277 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
278 /* not in dpc. schedule it for dpc to take over. */ 278 /* not in dpc. schedule it for dpc to take over. */
279 DEBUG(printk("%s(%ld): timeout schedule " 279 DEBUG(printk("%s(%ld): timeout schedule "
280 "isp_abort_needed.\n", __func__, ha->host_no);) 280 "isp_abort_needed.\n", __func__, ha->host_no));
281 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 281 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
282 "isp_abort_needed.\n", __func__, ha->host_no);) 282 "isp_abort_needed.\n", __func__, ha->host_no));
283 qla_printk(KERN_WARNING, ha, 283 qla_printk(KERN_WARNING, ha,
284 "Mailbox command timeout occured. Scheduling ISP " 284 "Mailbox command timeout occured. Scheduling ISP "
285 "abort.\n"); 285 "abort.\n");
@@ -288,9 +288,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
288 } else if (!abort_active) { 288 } else if (!abort_active) {
289 /* call abort directly since we are in the DPC thread */ 289 /* call abort directly since we are in the DPC thread */
290 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 290 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
291 __func__, ha->host_no);) 291 __func__, ha->host_no));
292 DEBUG2_3_11(printk("%s(%ld): timeout calling " 292 DEBUG2_3_11(printk("%s(%ld): timeout calling "
293 "abort_isp\n", __func__, ha->host_no);) 293 "abort_isp\n", __func__, ha->host_no));
294 qla_printk(KERN_WARNING, ha, 294 qla_printk(KERN_WARNING, ha,
295 "Mailbox command timeout occured. Issuing ISP " 295 "Mailbox command timeout occured. Issuing ISP "
296 "abort.\n"); 296 "abort.\n");
@@ -303,9 +303,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
303 } 303 }
304 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 304 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
305 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, 305 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
306 ha->host_no);) 306 ha->host_no));
307 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n", 307 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
308 __func__, ha->host_no);) 308 __func__, ha->host_no));
309 } 309 }
310 } 310 }
311 311
@@ -316,9 +316,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
316 if (rval) { 316 if (rval) {
317 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 317 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
318 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no, 318 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no,
319 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);) 319 mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
320 } else { 320 } else {
321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
322 } 322 }
323 323
324 return rval; 324 return rval;
@@ -394,7 +394,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
394 mbx_cmd_t mc; 394 mbx_cmd_t mc;
395 mbx_cmd_t *mcp = &mc; 395 mbx_cmd_t *mcp = &mc;
396 396
397 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 397 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
398 398
399 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 399 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
400 mcp->out_mb = MBX_0; 400 mcp->out_mb = MBX_0;
@@ -424,10 +424,10 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
424 } else { 424 } else {
425 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 425 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
426 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 426 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
427 __func__, ha->host_no, mcp->mb[1]);) 427 __func__, ha->host_no, mcp->mb[1]));
428 } else { 428 } else {
429 DEBUG11(printk("%s(%ld): done.\n", __func__, 429 DEBUG11(printk("%s(%ld): done.\n", __func__,
430 ha->host_no);) 430 ha->host_no));
431 } 431 }
432 } 432 }
433 433
@@ -611,7 +611,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
611 mbx_cmd_t mc; 611 mbx_cmd_t mc;
612 mbx_cmd_t *mcp = &mc; 612 mbx_cmd_t *mcp = &mc;
613 613
614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no);) 614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no));
615 615
616 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 616 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
617 mcp->mb[1] = 0xAAAA; 617 mcp->mb[1] = 0xAAAA;
@@ -639,11 +639,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
639 if (rval != QLA_SUCCESS) { 639 if (rval != QLA_SUCCESS) {
640 /*EMPTY*/ 640 /*EMPTY*/
641 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 641 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
642 ha->host_no, rval);) 642 ha->host_no, rval));
643 } else { 643 } else {
644 /*EMPTY*/ 644 /*EMPTY*/
645 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 645 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
646 ha->host_no);) 646 ha->host_no));
647 } 647 }
648 648
649 return rval; 649 return rval;
@@ -671,7 +671,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
671 mbx_cmd_t mc; 671 mbx_cmd_t mc;
672 mbx_cmd_t *mcp = &mc; 672 mbx_cmd_t *mcp = &mc;
673 673
674 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 674 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
675 675
676 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 676 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
677 mcp->out_mb = MBX_0; 677 mcp->out_mb = MBX_0;
@@ -694,9 +694,9 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
694 if (rval != QLA_SUCCESS) { 694 if (rval != QLA_SUCCESS) {
695 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 695 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
696 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 696 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
697 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));) 697 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])));
698 } else { 698 } else {
699 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 699 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
700 } 700 }
701 701
702 return rval; 702 return rval;
@@ -743,9 +743,9 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
743 if (rval != QLA_SUCCESS) { 743 if (rval != QLA_SUCCESS) {
744 /*EMPTY*/ 744 /*EMPTY*/
745 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 745 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
746 ha->host_no, rval);) 746 ha->host_no, rval));
747 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 747 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
748 ha->host_no, rval);) 748 ha->host_no, rval));
749 } else { 749 } else {
750 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 750 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
751 751
@@ -781,7 +781,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
781 mbx_cmd_t mc; 781 mbx_cmd_t mc;
782 mbx_cmd_t *mcp = &mc; 782 mbx_cmd_t *mcp = &mc;
783 783
784 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no);) 784 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no));
785 785
786 fcport = sp->fcport; 786 fcport = sp->fcport;
787 787
@@ -813,11 +813,11 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
813 813
814 if (rval != QLA_SUCCESS) { 814 if (rval != QLA_SUCCESS) {
815 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 815 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
816 ha->host_no, rval);) 816 ha->host_no, rval));
817 } else { 817 } else {
818 sp->flags |= SRB_ABORT_PENDING; 818 sp->flags |= SRB_ABORT_PENDING;
819 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 819 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
820 ha->host_no);) 820 ha->host_no));
821 } 821 }
822 822
823 return rval; 823 return rval;
@@ -848,7 +848,7 @@ qla2x00_abort_target(fc_port_t *fcport)
848 if (fcport == NULL) 848 if (fcport == NULL)
849 return 0; 849 return 0;
850 850
851 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no);) 851 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
852 852
853 ha = fcport->ha; 853 ha = fcport->ha;
854 mcp->mb[0] = MBC_ABORT_TARGET; 854 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -872,11 +872,11 @@ qla2x00_abort_target(fc_port_t *fcport)
872 872
873 if (rval != QLA_SUCCESS) { 873 if (rval != QLA_SUCCESS) {
874 DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n", 874 DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n",
875 ha->host_no, rval);) 875 ha->host_no, rval));
876 } else { 876 } else {
877 /*EMPTY*/ 877 /*EMPTY*/
878 DEBUG11(printk("qla2x00_abort_target(%ld): done.\n", 878 DEBUG11(printk("qla2x00_abort_target(%ld): done.\n",
879 ha->host_no);) 879 ha->host_no));
880 } 880 }
881 881
882 return rval; 882 return rval;
@@ -912,7 +912,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
912 mbx_cmd_t *mcp = &mc; 912 mbx_cmd_t *mcp = &mc;
913 913
914 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 914 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
915 ha->host_no);) 915 ha->host_no));
916 916
917 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 917 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
918 mcp->out_mb = MBX_0; 918 mcp->out_mb = MBX_0;
@@ -933,11 +933,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
933 if (rval != QLA_SUCCESS) { 933 if (rval != QLA_SUCCESS) {
934 /*EMPTY*/ 934 /*EMPTY*/
935 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 935 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
936 ha->host_no, rval);) 936 ha->host_no, rval));
937 } else { 937 } else {
938 /*EMPTY*/ 938 /*EMPTY*/
939 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 939 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
940 ha->host_no);) 940 ha->host_no));
941 } 941 }
942 942
943 return rval; 943 return rval;
@@ -968,7 +968,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
968 mbx_cmd_t *mcp = &mc; 968 mbx_cmd_t *mcp = &mc;
969 969
970 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 970 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
971 ha->host_no);) 971 ha->host_no));
972 972
973 mcp->mb[0] = MBC_GET_RETRY_COUNT; 973 mcp->mb[0] = MBC_GET_RETRY_COUNT;
974 mcp->out_mb = MBX_0; 974 mcp->out_mb = MBX_0;
@@ -980,7 +980,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
980 if (rval != QLA_SUCCESS) { 980 if (rval != QLA_SUCCESS) {
981 /*EMPTY*/ 981 /*EMPTY*/
982 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 982 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
983 ha->host_no, mcp->mb[0]);) 983 ha->host_no, mcp->mb[0]));
984 } else { 984 } else {
985 /* Convert returned data and check our values. */ 985 /* Convert returned data and check our values. */
986 *r_a_tov = mcp->mb[3] / 2; 986 *r_a_tov = mcp->mb[3] / 2;
@@ -992,7 +992,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
992 } 992 }
993 993
994 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 994 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
995 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov);) 995 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov));
996 } 996 }
997 997
998 return rval; 998 return rval;
@@ -1023,7 +1023,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1023 mbx_cmd_t *mcp = &mc; 1023 mbx_cmd_t *mcp = &mc;
1024 1024
1025 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1025 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1026 ha->host_no);) 1026 ha->host_no));
1027 1027
1028 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1028 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1029 mcp->mb[2] = MSW(ha->init_cb_dma); 1029 mcp->mb[2] = MSW(ha->init_cb_dma);
@@ -1043,11 +1043,11 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1043 /*EMPTY*/ 1043 /*EMPTY*/
1044 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1044 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
1045 "mb0=%x.\n", 1045 "mb0=%x.\n",
1046 ha->host_no, rval, mcp->mb[0]);) 1046 ha->host_no, rval, mcp->mb[0]));
1047 } else { 1047 } else {
1048 /*EMPTY*/ 1048 /*EMPTY*/
1049 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1049 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
1050 ha->host_no);) 1050 ha->host_no));
1051 } 1051 }
1052 1052
1053 return rval; 1053 return rval;
@@ -1079,7 +1079,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1079 struct port_database_24xx *pd24; 1079 struct port_database_24xx *pd24;
1080 dma_addr_t pd_dma; 1080 dma_addr_t pd_dma;
1081 1081
1082 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1082 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1083 1083
1084 pd24 = NULL; 1084 pd24 = NULL;
1085 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1085 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1220,7 +1220,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr)
1220 mbx_cmd_t *mcp = &mc; 1220 mbx_cmd_t *mcp = &mc;
1221 1221
1222 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1222 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
1223 ha->host_no);) 1223 ha->host_no));
1224 1224
1225 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1225 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1226 mcp->out_mb = MBX_0; 1226 mcp->out_mb = MBX_0;
@@ -1235,11 +1235,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr)
1235 if (rval != QLA_SUCCESS) { 1235 if (rval != QLA_SUCCESS) {
1236 /*EMPTY*/ 1236 /*EMPTY*/
1237 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1237 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
1238 "failed=%x.\n", ha->host_no, rval);) 1238 "failed=%x.\n", ha->host_no, rval));
1239 } else { 1239 } else {
1240 /*EMPTY*/ 1240 /*EMPTY*/
1241 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1241 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
1242 ha->host_no);) 1242 ha->host_no));
1243 } 1243 }
1244 1244
1245 return rval; 1245 return rval;
@@ -1272,7 +1272,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1272 mbx_cmd_t *mcp = &mc; 1272 mbx_cmd_t *mcp = &mc;
1273 1273
1274 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1274 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
1275 ha->host_no);) 1275 ha->host_no));
1276 1276
1277 mcp->mb[0] = MBC_GET_PORT_NAME; 1277 mcp->mb[0] = MBC_GET_PORT_NAME;
1278 mcp->out_mb = MBX_1|MBX_0; 1278 mcp->out_mb = MBX_1|MBX_0;
@@ -1292,7 +1292,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1292 if (rval != QLA_SUCCESS) { 1292 if (rval != QLA_SUCCESS) {
1293 /*EMPTY*/ 1293 /*EMPTY*/
1294 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1294 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
1295 ha->host_no, rval);) 1295 ha->host_no, rval));
1296 } else { 1296 } else {
1297 if (name != NULL) { 1297 if (name != NULL) {
1298 /* This function returns name in big endian. */ 1298 /* This function returns name in big endian. */
@@ -1307,7 +1307,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1307 } 1307 }
1308 1308
1309 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1309 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
1310 ha->host_no);) 1310 ha->host_no));
1311 } 1311 }
1312 1312
1313 return rval; 1313 return rval;
@@ -1335,7 +1335,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1335 mbx_cmd_t mc; 1335 mbx_cmd_t mc;
1336 mbx_cmd_t *mcp = &mc; 1336 mbx_cmd_t *mcp = &mc;
1337 1337
1338 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1338 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1339 1339
1340 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1340 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1341 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1341 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
@@ -1364,10 +1364,10 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1364 if (rval != QLA_SUCCESS) { 1364 if (rval != QLA_SUCCESS) {
1365 /*EMPTY*/ 1365 /*EMPTY*/
1366 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1366 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
1367 __func__, ha->host_no, rval);) 1367 __func__, ha->host_no, rval));
1368 } else { 1368 } else {
1369 /*EMPTY*/ 1369 /*EMPTY*/
1370 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1370 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1371 } 1371 }
1372 1372
1373 return rval; 1373 return rval;
@@ -1400,10 +1400,10 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1400 mbx_cmd_t *mcp = &mc; 1400 mbx_cmd_t *mcp = &mc;
1401 1401
1402 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1402 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
1403 ha->host_no);) 1403 ha->host_no));
1404 1404
1405 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1405 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
1406 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov);) 1406 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov));
1407 1407
1408 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1408 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1409 mcp->mb[1] = cmd_size; 1409 mcp->mb[1] = cmd_size;
@@ -1421,12 +1421,12 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1421 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1422 /*EMPTY*/ 1422 /*EMPTY*/
1423 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1423 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1424 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]);) 1424 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1425 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1425 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1426 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]);) 1426 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1427 } else { 1427 } else {
1428 /*EMPTY*/ 1428 /*EMPTY*/
1429 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no);) 1429 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no));
1430 } 1430 }
1431 1431
1432 return rval; 1432 return rval;
@@ -1442,7 +1442,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1442 dma_addr_t lg_dma; 1442 dma_addr_t lg_dma;
1443 uint32_t iop[2]; 1443 uint32_t iop[2];
1444 1444
1445 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1445 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1446 1446
1447 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1447 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1448 if (lg == NULL) { 1448 if (lg == NULL) {
@@ -1458,13 +1458,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1458 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1458 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1459 if (opt & BIT_0) 1459 if (opt & BIT_0)
1460 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); 1460 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1461 if (opt & BIT_1)
1462 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1461 lg->port_id[0] = al_pa; 1463 lg->port_id[0] = al_pa;
1462 lg->port_id[1] = area; 1464 lg->port_id[1] = area;
1463 lg->port_id[2] = domain; 1465 lg->port_id[2] = domain;
1464 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1466 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1465 if (rval != QLA_SUCCESS) { 1467 if (rval != QLA_SUCCESS) {
1466 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1468 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
1467 "(%x).\n", __func__, ha->host_no, rval);) 1469 "(%x).\n", __func__, ha->host_no, rval));
1468 } else if (lg->entry_status != 0) { 1470 } else if (lg->entry_status != 0) {
1469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1471 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1470 "-- error status (%x).\n", __func__, ha->host_no, 1472 "-- error status (%x).\n", __func__, ha->host_no,
@@ -1505,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1505 break; 1507 break;
1506 } 1508 }
1507 } else { 1509 } else {
1508 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1510 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1509 1511
1510 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1512 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1511 1513
@@ -1559,7 +1561,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1559 mbx_cmd_t mc; 1561 mbx_cmd_t mc;
1560 mbx_cmd_t *mcp = &mc; 1562 mbx_cmd_t *mcp = &mc;
1561 1563
1562 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no);) 1564 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no));
1563 1565
1564 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1566 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1565 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1567 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1604,11 +1606,11 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1604 /*EMPTY*/ 1606 /*EMPTY*/
1605 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1607 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
1606 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval, 1608 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval,
1607 mcp->mb[0], mcp->mb[1], mcp->mb[2]);) 1609 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
1608 } else { 1610 } else {
1609 /*EMPTY*/ 1611 /*EMPTY*/
1610 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1612 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
1611 ha->host_no);) 1613 ha->host_no));
1612 } 1614 }
1613 1615
1614 return rval; 1616 return rval;
@@ -1643,7 +1645,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1643 fcport->d_id.b.domain, fcport->d_id.b.area, 1645 fcport->d_id.b.domain, fcport->d_id.b.area,
1644 fcport->d_id.b.al_pa, mb_ret, opt); 1646 fcport->d_id.b.al_pa, mb_ret, opt);
1645 1647
1646 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1648 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1647 1649
1648 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1650 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1649 if (HAS_EXTENDED_IDS(ha)) 1651 if (HAS_EXTENDED_IDS(ha))
@@ -1677,13 +1679,13 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1677 1679
1678 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1680 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1679 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1681 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
1680 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);) 1682 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1681 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1683 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1682 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1684 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
1683 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);) 1685 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1684 } else { 1686 } else {
1685 /*EMPTY*/ 1687 /*EMPTY*/
1686 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1688 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no));
1687 } 1689 }
1688 1690
1689 return (rval); 1691 return (rval);
@@ -1697,7 +1699,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1697 struct logio_entry_24xx *lg; 1699 struct logio_entry_24xx *lg;
1698 dma_addr_t lg_dma; 1700 dma_addr_t lg_dma;
1699 1701
1700 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1702 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1701 1703
1702 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1704 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1703 if (lg == NULL) { 1705 if (lg == NULL) {
@@ -1718,7 +1720,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1718 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1720 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1719 if (rval != QLA_SUCCESS) { 1721 if (rval != QLA_SUCCESS) {
1720 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1722 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
1721 "(%x).\n", __func__, ha->host_no, rval);) 1723 "(%x).\n", __func__, ha->host_no, rval));
1722 } else if (lg->entry_status != 0) { 1724 } else if (lg->entry_status != 0) {
1723 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1725 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1724 "-- error status (%x).\n", __func__, ha->host_no, 1726 "-- error status (%x).\n", __func__, ha->host_no,
@@ -1729,10 +1731,10 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1729 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1731 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1730 ha->host_no, le16_to_cpu(lg->comp_status), 1732 ha->host_no, le16_to_cpu(lg->comp_status),
1731 le32_to_cpu(lg->io_parameter[0]), 1733 le32_to_cpu(lg->io_parameter[0]),
1732 le32_to_cpu(lg->io_parameter[1]));) 1734 le32_to_cpu(lg->io_parameter[1])));
1733 } else { 1735 } else {
1734 /*EMPTY*/ 1736 /*EMPTY*/
1735 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1737 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1736 } 1738 }
1737 1739
1738 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1740 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1765,7 +1767,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1765 mbx_cmd_t *mcp = &mc; 1767 mbx_cmd_t *mcp = &mc;
1766 1768
1767 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1769 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
1768 ha->host_no);) 1770 ha->host_no));
1769 1771
1770 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1772 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1771 mcp->out_mb = MBX_1|MBX_0; 1773 mcp->out_mb = MBX_1|MBX_0;
@@ -1785,11 +1787,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1785 if (rval != QLA_SUCCESS) { 1787 if (rval != QLA_SUCCESS) {
1786 /*EMPTY*/ 1788 /*EMPTY*/
1787 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1789 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
1788 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]);) 1790 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]));
1789 } else { 1791 } else {
1790 /*EMPTY*/ 1792 /*EMPTY*/
1791 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1793 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
1792 ha->host_no);) 1794 ha->host_no));
1793 } 1795 }
1794 1796
1795 return rval; 1797 return rval;
@@ -1818,7 +1820,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1818 mbx_cmd_t *mcp = &mc; 1820 mbx_cmd_t *mcp = &mc;
1819 1821
1820 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1822 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1821 ha->host_no);) 1823 ha->host_no));
1822 1824
1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1825 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1824 mcp->mb[1] = 0; 1826 mcp->mb[1] = 0;
@@ -1833,11 +1835,11 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1833 if (rval != QLA_SUCCESS) { 1835 if (rval != QLA_SUCCESS) {
1834 /*EMPTY*/ 1836 /*EMPTY*/
1835 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1837 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
1836 ha->host_no, rval);) 1838 ha->host_no, rval));
1837 } else { 1839 } else {
1838 /*EMPTY*/ 1840 /*EMPTY*/
1839 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1841 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
1840 ha->host_no);) 1842 ha->host_no));
1841 } 1843 }
1842 1844
1843 return rval; 1845 return rval;
@@ -1864,7 +1866,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1864 mbx_cmd_t *mcp = &mc; 1866 mbx_cmd_t *mcp = &mc;
1865 1867
1866 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 1868 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
1867 ha->host_no);) 1869 ha->host_no));
1868 1870
1869 if (id_list == NULL) 1871 if (id_list == NULL)
1870 return QLA_FUNCTION_FAILED; 1872 return QLA_FUNCTION_FAILED;
@@ -1893,11 +1895,11 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1893 if (rval != QLA_SUCCESS) { 1895 if (rval != QLA_SUCCESS) {
1894 /*EMPTY*/ 1896 /*EMPTY*/
1895 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 1897 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
1896 ha->host_no, rval);) 1898 ha->host_no, rval));
1897 } else { 1899 } else {
1898 *entries = mcp->mb[1]; 1900 *entries = mcp->mb[1];
1899 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 1901 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
1900 ha->host_no);) 1902 ha->host_no));
1901 } 1903 }
1902 1904
1903 return rval; 1905 return rval;
@@ -1936,7 +1938,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1936 if (rval != QLA_SUCCESS) { 1938 if (rval != QLA_SUCCESS) {
1937 /*EMPTY*/ 1939 /*EMPTY*/
1938 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 1940 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
1939 ha->host_no, mcp->mb[0]);) 1941 ha->host_no, mcp->mb[0]));
1940 } else { 1942 } else {
1941 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 1943 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
1942 "mb7=%x mb10=%x.\n", __func__, ha->host_no, 1944 "mb7=%x mb10=%x.\n", __func__, ha->host_no,
@@ -2045,7 +2047,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2045 link_stat_t *stat_buf; 2047 link_stat_t *stat_buf;
2046 dma_addr_t stat_buf_dma; 2048 dma_addr_t stat_buf_dma;
2047 2049
2048 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2050 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2049 2051
2050 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma); 2052 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2051 if (stat_buf == NULL) { 2053 if (stat_buf == NULL) {
@@ -2083,7 +2085,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2083 if (rval == QLA_SUCCESS) { 2085 if (rval == QLA_SUCCESS) {
2084 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2086 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2085 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2087 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2086 __func__, ha->host_no, mcp->mb[0]);) 2088 __func__, ha->host_no, mcp->mb[0]));
2087 status[0] = mcp->mb[0]; 2089 status[0] = mcp->mb[0];
2088 rval = BIT_1; 2090 rval = BIT_1;
2089 } else { 2091 } else {
@@ -2108,12 +2110,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2108 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt, 2110 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2109 stat_buf->prim_seq_err_cnt, 2111 stat_buf->prim_seq_err_cnt,
2110 stat_buf->inval_xmit_word_cnt, 2112 stat_buf->inval_xmit_word_cnt,
2111 stat_buf->inval_crc_cnt);) 2113 stat_buf->inval_crc_cnt));
2112 } 2114 }
2113 } else { 2115 } else {
2114 /* Failed. */ 2116 /* Failed. */
2115 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2117 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2116 ha->host_no, rval);) 2118 ha->host_no, rval));
2117 rval = BIT_1; 2119 rval = BIT_1;
2118 } 2120 }
2119 2121
@@ -2132,7 +2134,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2132 uint32_t *sbuf, *siter; 2134 uint32_t *sbuf, *siter;
2133 dma_addr_t sbuf_dma; 2135 dma_addr_t sbuf_dma;
2134 2136
2135 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2137 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2136 2138
2137 if (dwords > (DMA_POOL_SIZE / 4)) { 2139 if (dwords > (DMA_POOL_SIZE / 4)) {
2138 DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs " 2140 DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs "
@@ -2196,7 +2198,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2196 dma_addr_t abt_dma; 2198 dma_addr_t abt_dma;
2197 uint32_t handle; 2199 uint32_t handle;
2198 2200
2199 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2201 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2200 2202
2201 fcport = sp->fcport; 2203 fcport = sp->fcport;
2202 2204
@@ -2229,7 +2231,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2229 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2231 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
2230 if (rval != QLA_SUCCESS) { 2232 if (rval != QLA_SUCCESS) {
2231 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2233 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
2232 __func__, ha->host_no, rval);) 2234 __func__, ha->host_no, rval));
2233 } else if (abt->entry_status != 0) { 2235 } else if (abt->entry_status != 0) {
2234 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2236 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2235 "-- error status (%x).\n", __func__, ha->host_no, 2237 "-- error status (%x).\n", __func__, ha->host_no,
@@ -2238,10 +2240,10 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2238 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2240 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2239 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2241 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2240 "-- completion status (%x).\n", __func__, ha->host_no, 2242 "-- completion status (%x).\n", __func__, ha->host_no,
2241 le16_to_cpu(abt->nport_handle));) 2243 le16_to_cpu(abt->nport_handle)));
2242 rval = QLA_FUNCTION_FAILED; 2244 rval = QLA_FUNCTION_FAILED;
2243 } else { 2245 } else {
2244 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 2246 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2245 sp->flags |= SRB_ABORT_PENDING; 2247 sp->flags |= SRB_ABORT_PENDING;
2246 } 2248 }
2247 2249
@@ -2268,7 +2270,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2268 if (fcport == NULL) 2270 if (fcport == NULL)
2269 return 0; 2271 return 0;
2270 2272
2271 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no);) 2273 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
2272 2274
2273 ha = fcport->ha; 2275 ha = fcport->ha;
2274 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2276 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
@@ -2290,7 +2292,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2290 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2292 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
2291 if (rval != QLA_SUCCESS) { 2293 if (rval != QLA_SUCCESS) {
2292 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB " 2294 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
2293 "(%x).\n", __func__, ha->host_no, rval);) 2295 "(%x).\n", __func__, ha->host_no, rval));
2294 goto atarget_done; 2296 goto atarget_done;
2295 } else if (tsk->p.sts.entry_status != 0) { 2297 } else if (tsk->p.sts.entry_status != 0) {
2296 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2298 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
@@ -2302,7 +2304,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2302 __constant_cpu_to_le16(CS_COMPLETE)) { 2304 __constant_cpu_to_le16(CS_COMPLETE)) {
2303 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2305 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2304 "-- completion status (%x).\n", __func__, 2306 "-- completion status (%x).\n", __func__,
2305 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status));) 2307 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
2306 rval = QLA_FUNCTION_FAILED; 2308 rval = QLA_FUNCTION_FAILED;
2307 goto atarget_done; 2309 goto atarget_done;
2308 } 2310 }
@@ -2311,9 +2313,9 @@ qla24xx_abort_target(fc_port_t *fcport)
2311 rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); 2313 rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
2312 if (rval != QLA_SUCCESS) { 2314 if (rval != QLA_SUCCESS) {
2313 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2315 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
2314 "(%x).\n", __func__, ha->host_no, rval);) 2316 "(%x).\n", __func__, ha->host_no, rval));
2315 } else { 2317 } else {
2316 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 2318 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2317 } 2319 }
2318 2320
2319atarget_done: 2321atarget_done:
@@ -2460,3 +2462,81 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
2460 2462
2461 return rval; 2463 return rval;
2462} 2464}
2465
2466int
2467qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2468 uint16_t buffers)
2469{
2470 int rval;
2471 mbx_cmd_t mc;
2472 mbx_cmd_t *mcp = &mc;
2473
2474 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
2475 return QLA_FUNCTION_FAILED;
2476
2477 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2478
2479 mcp->mb[0] = MBC_TRACE_CONTROL;
2480 mcp->mb[1] = ctrl;
2481 mcp->out_mb = MBX_1|MBX_0;
2482 mcp->in_mb = MBX_1|MBX_0;
2483 if (ctrl == TC_ENABLE) {
2484 mcp->mb[2] = LSW(eft_dma);
2485 mcp->mb[3] = MSW(eft_dma);
2486 mcp->mb[4] = LSW(MSD(eft_dma));
2487 mcp->mb[5] = MSW(MSD(eft_dma));
2488 mcp->mb[6] = buffers;
2489 mcp->mb[7] = buffers;
2490 mcp->out_mb |= MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2;
2491 }
2492 mcp->tov = 30;
2493 mcp->flags = 0;
2494 rval = qla2x00_mailbox_command(ha, mcp);
2495
2496 if (rval != QLA_SUCCESS) {
2497 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2498 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2499 } else {
2500 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2501 }
2502
2503 return rval;
2504}
2505
2506int
2507qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2508 uint16_t off, uint16_t count)
2509{
2510 int rval;
2511 mbx_cmd_t mc;
2512 mbx_cmd_t *mcp = &mc;
2513
2514 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
2515 return QLA_FUNCTION_FAILED;
2516
2517 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2518
2519 mcp->mb[0] = MBC_READ_SFP;
2520 mcp->mb[1] = addr;
2521 mcp->mb[2] = MSW(sfp_dma);
2522 mcp->mb[3] = LSW(sfp_dma);
2523 mcp->mb[6] = MSW(MSD(sfp_dma));
2524 mcp->mb[7] = LSW(MSD(sfp_dma));
2525 mcp->mb[8] = count;
2526 mcp->mb[9] = off;
2527 mcp->mb[10] = 0;
2528 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2529 mcp->in_mb = MBX_0;
2530 mcp->tov = 30;
2531 mcp->flags = 0;
2532 rval = qla2x00_mailbox_command(ha, mcp);
2533
2534 if (rval != QLA_SUCCESS) {
2535 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2536 ha->host_no, rval, mcp->mb[0]));
2537 } else {
2538 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2539 }
2540
2541 return rval;
2542}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ccaad0b08d35..65cbe2f5eea2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -39,14 +39,14 @@ MODULE_PARM_DESC(ql2xlogintimeout,
39int qlport_down_retry = 30; 39int qlport_down_retry = 30;
40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
41MODULE_PARM_DESC(qlport_down_retry, 41MODULE_PARM_DESC(qlport_down_retry,
42 "Maximum number of command retries to a port that returns" 42 "Maximum number of command retries to a port that returns "
43 "a PORT-DOWN status."); 43 "a PORT-DOWN status.");
44 44
45int ql2xplogiabsentdevice; 45int ql2xplogiabsentdevice;
46module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 46module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
47MODULE_PARM_DESC(ql2xplogiabsentdevice, 47MODULE_PARM_DESC(ql2xplogiabsentdevice,
48 "Option to enable PLOGI to devices that are not present after " 48 "Option to enable PLOGI to devices that are not present after "
49 "a Fabric scan. This is needed for several broken switches." 49 "a Fabric scan. This is needed for several broken switches. "
50 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 50 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
51 51
52int ql2xloginretrycount = 0; 52int ql2xloginretrycount = 0;
@@ -54,6 +54,19 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
54MODULE_PARM_DESC(ql2xloginretrycount, 54MODULE_PARM_DESC(ql2xloginretrycount,
55 "Specify an alternate value for the NVRAM login retry count."); 55 "Specify an alternate value for the NVRAM login retry count.");
56 56
57int ql2xallocfwdump = 1;
58module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
59MODULE_PARM_DESC(ql2xallocfwdump,
60 "Option to enable allocation of memory for a firmware dump "
61 "during HBA initialization. Memory allocation requirements "
62 "vary by ISP type. Default is 1 - allocate memory.");
63
64int extended_error_logging;
65module_param(extended_error_logging, int, S_IRUGO|S_IRUSR);
66MODULE_PARM_DESC(extended_error_logging,
67 "Option to enable extended error logging, "
68 "Default is 0 - no logging. 1 - log errors.");
69
57static void qla2x00_free_device(scsi_qla_host_t *); 70static void qla2x00_free_device(scsi_qla_host_t *);
58 71
59static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 72static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
@@ -624,7 +637,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
624 637
625 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 638 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
626 __func__, ha->host_no, sp, serial)); 639 __func__, ha->host_no, sp, serial));
627 DEBUG3(qla2x00_print_scsi_cmd(cmd);) 640 DEBUG3(qla2x00_print_scsi_cmd(cmd));
628 641
629 spin_unlock_irqrestore(&ha->hardware_lock, flags); 642 spin_unlock_irqrestore(&ha->hardware_lock, flags);
630 if (ha->isp_ops.abort_command(ha, sp)) { 643 if (ha->isp_ops.abort_command(ha, sp)) {
@@ -731,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
731{ 744{
732 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
733 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
734 srb_t *sp;
735 int ret; 747 int ret;
736 unsigned int id, lun; 748 unsigned int id, lun;
737 unsigned long serial; 749 unsigned long serial;
@@ -742,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
742 lun = cmd->device->lun; 754 lun = cmd->device->lun;
743 serial = cmd->serial_number; 755 serial = cmd->serial_number;
744 756
745 sp = (srb_t *) CMD_SP(cmd); 757 if (!fcport)
746 if (!sp || !fcport)
747 return ret; 758 return ret;
748 759
749 qla_printk(KERN_INFO, ha, 760 qla_printk(KERN_INFO, ha,
@@ -766,7 +777,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
766#endif 777#endif
767 } else { 778 } else {
768 DEBUG2(printk(KERN_INFO 779 DEBUG2(printk(KERN_INFO
769 "%s failed: loop not ready\n",__func__);) 780 "%s failed: loop not ready\n",__func__));
770 } 781 }
771 782
772 if (ret == FAILED) { 783 if (ret == FAILED) {
@@ -862,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
862{ 873{
863 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 874 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
864 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 875 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
865 srb_t *sp;
866 int ret; 876 int ret;
867 unsigned int id, lun; 877 unsigned int id, lun;
868 unsigned long serial; 878 unsigned long serial;
@@ -873,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
873 lun = cmd->device->lun; 883 lun = cmd->device->lun;
874 serial = cmd->serial_number; 884 serial = cmd->serial_number;
875 885
876 sp = (srb_t *) CMD_SP(cmd); 886 if (!fcport)
877 if (!sp || !fcport)
878 return ret; 887 return ret;
879 888
880 qla_printk(KERN_INFO, ha, 889 qla_printk(KERN_INFO, ha,
@@ -923,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
923{ 932{
924 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 933 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
925 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 934 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
926 srb_t *sp;
927 int ret; 935 int ret;
928 unsigned int id, lun; 936 unsigned int id, lun;
929 unsigned long serial; 937 unsigned long serial;
@@ -934,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
934 lun = cmd->device->lun; 942 lun = cmd->device->lun;
935 serial = cmd->serial_number; 943 serial = cmd->serial_number;
936 944
937 sp = (srb_t *) CMD_SP(cmd); 945 if (!fcport)
938 if (!sp || !fcport)
939 return ret; 946 return ret;
940 947
941 qla_printk(KERN_INFO, ha, 948 qla_printk(KERN_INFO, ha,
@@ -1021,12 +1028,12 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1021 /* Empty */ 1028 /* Empty */
1022 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n", 1029 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n",
1023 __func__, 1030 __func__,
1024 ha->host_no);) 1031 ha->host_no));
1025 } else { 1032 } else {
1026 /* Empty */ 1033 /* Empty */
1027 DEBUG3(printk("%s(%ld): exiting normally.\n", 1034 DEBUG3(printk("%s(%ld): exiting normally.\n",
1028 __func__, 1035 __func__,
1029 ha->host_no);) 1036 ha->host_no));
1030 } 1037 }
1031 1038
1032 return(status); 1039 return(status);
@@ -1324,7 +1331,8 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
1324/* 1331/*
1325 * PCI driver interface 1332 * PCI driver interface
1326 */ 1333 */
1327static int qla2x00_probe_one(struct pci_dev *pdev) 1334static int __devinit
1335qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1328{ 1336{
1329 int ret = -ENODEV; 1337 int ret = -ENODEV;
1330 device_reg_t __iomem *reg; 1338 device_reg_t __iomem *reg;
@@ -1405,7 +1413,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1405 ha->isp_ops.read_nvram = qla2x00_read_nvram_data; 1413 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1406 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1414 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1407 ha->isp_ops.fw_dump = qla2100_fw_dump; 1415 ha->isp_ops.fw_dump = qla2100_fw_dump;
1408 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1409 ha->isp_ops.read_optrom = qla2x00_read_optrom_data; 1416 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1410 ha->isp_ops.write_optrom = qla2x00_write_optrom_data; 1417 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1411 if (IS_QLA2100(ha)) { 1418 if (IS_QLA2100(ha)) {
@@ -1432,7 +1439,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1432 ha->isp_ops.pci_config = qla2300_pci_config; 1439 ha->isp_ops.pci_config = qla2300_pci_config;
1433 ha->isp_ops.intr_handler = qla2300_intr_handler; 1440 ha->isp_ops.intr_handler = qla2300_intr_handler;
1434 ha->isp_ops.fw_dump = qla2300_fw_dump; 1441 ha->isp_ops.fw_dump = qla2300_fw_dump;
1435 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1436 ha->isp_ops.beacon_on = qla2x00_beacon_on; 1442 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1437 ha->isp_ops.beacon_off = qla2x00_beacon_off; 1443 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1438 ha->isp_ops.beacon_blink = qla2x00_beacon_blink; 1444 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
@@ -1469,7 +1475,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1469 ha->isp_ops.read_nvram = qla24xx_read_nvram_data; 1475 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1470 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1476 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1471 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1477 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1472 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1473 ha->isp_ops.read_optrom = qla24xx_read_optrom_data; 1478 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1474 ha->isp_ops.write_optrom = qla24xx_write_optrom_data; 1479 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1475 ha->isp_ops.beacon_on = qla24xx_beacon_on; 1480 ha->isp_ops.beacon_on = qla24xx_beacon_on;
@@ -1541,7 +1546,7 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1541 host->transportt = qla2xxx_transport_template; 1546 host->transportt = qla2xxx_transport_template;
1542 1547
1543 ret = request_irq(pdev->irq, ha->isp_ops.intr_handler, 1548 ret = request_irq(pdev->irq, ha->isp_ops.intr_handler,
1544 SA_INTERRUPT|SA_SHIRQ, QLA2XXX_DRIVER_NAME, ha); 1549 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1545 if (ret) { 1550 if (ret) {
1546 qla_printk(KERN_WARNING, ha, 1551 qla_printk(KERN_WARNING, ha,
1547 "Failed to reserve interrupt %d already in use.\n", 1552 "Failed to reserve interrupt %d already in use.\n",
@@ -1640,7 +1645,8 @@ probe_out:
1640 return ret; 1645 return ret;
1641} 1646}
1642 1647
1643static void qla2x00_remove_one(struct pci_dev *pdev) 1648static void __devexit
1649qla2x00_remove_one(struct pci_dev *pdev)
1644{ 1650{
1645 scsi_qla_host_t *ha; 1651 scsi_qla_host_t *ha;
1646 1652
@@ -1678,6 +1684,9 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1678 kthread_stop(t); 1684 kthread_stop(t);
1679 } 1685 }
1680 1686
1687 if (ha->eft)
1688 qla2x00_trace_control(ha, TC_DISABLE, 0, 0);
1689
1681 /* Stop currently executing firmware. */ 1690 /* Stop currently executing firmware. */
1682 qla2x00_stop_firmware(ha); 1691 qla2x00_stop_firmware(ha);
1683 1692
@@ -1899,17 +1908,6 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
1899 } 1908 }
1900 memset(ha->init_cb, 0, ha->init_cb_size); 1909 memset(ha->init_cb, 0, ha->init_cb_size);
1901 1910
1902 /* Allocate ioctl related memory. */
1903 if (qla2x00_alloc_ioctl_mem(ha)) {
1904 qla_printk(KERN_WARNING, ha,
1905 "Memory Allocation failed - ioctl_mem\n");
1906
1907 qla2x00_mem_free(ha);
1908 msleep(100);
1909
1910 continue;
1911 }
1912
1913 if (qla2x00_allocate_sp_pool(ha)) { 1911 if (qla2x00_allocate_sp_pool(ha)) {
1914 qla_printk(KERN_WARNING, ha, 1912 qla_printk(KERN_WARNING, ha,
1915 "Memory Allocation failed - " 1913 "Memory Allocation failed - "
@@ -1972,6 +1970,26 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
1972 continue; 1970 continue;
1973 } 1971 }
1974 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 1972 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
1973
1974 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1975 /*
1976 * Get consistent memory allocated for SFP
1977 * block.
1978 */
1979 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool,
1980 GFP_KERNEL, &ha->sfp_data_dma);
1981 if (ha->sfp_data == NULL) {
1982 qla_printk(KERN_WARNING, ha,
1983 "Memory Allocation failed - "
1984 "sfp_data\n");
1985
1986 qla2x00_mem_free(ha);
1987 msleep(100);
1988
1989 continue;
1990 }
1991 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
1992 }
1975 } 1993 }
1976 1994
1977 /* Done all allocations without any error. */ 1995 /* Done all allocations without any error. */
@@ -2006,12 +2024,16 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2006 return; 2024 return;
2007 } 2025 }
2008 2026
2009 /* free ioctl memory */
2010 qla2x00_free_ioctl_mem(ha);
2011
2012 /* free sp pool */ 2027 /* free sp pool */
2013 qla2x00_free_sp_pool(ha); 2028 qla2x00_free_sp_pool(ha);
2014 2029
2030 if (ha->fw_dump) {
2031 if (ha->eft)
2032 dma_free_coherent(&ha->pdev->dev,
2033 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2034 vfree(ha->fw_dump);
2035 }
2036
2015 if (ha->sns_cmd) 2037 if (ha->sns_cmd)
2016 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2038 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2017 ha->sns_cmd, ha->sns_cmd_dma); 2039 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2020,6 +2042,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2020 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2042 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2021 ha->ct_sns, ha->ct_sns_dma); 2043 ha->ct_sns, ha->ct_sns_dma);
2022 2044
2045 if (ha->sfp_data)
2046 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2047
2023 if (ha->ms_iocb) 2048 if (ha->ms_iocb)
2024 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2049 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2025 2050
@@ -2043,6 +2068,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2043 (ha->request_q_length + 1) * sizeof(request_t), 2068 (ha->request_q_length + 1) * sizeof(request_t),
2044 ha->request_ring, ha->request_dma); 2069 ha->request_ring, ha->request_dma);
2045 2070
2071 ha->eft = NULL;
2072 ha->eft_dma = 0;
2046 ha->sns_cmd = NULL; 2073 ha->sns_cmd = NULL;
2047 ha->sns_cmd_dma = 0; 2074 ha->sns_cmd_dma = 0;
2048 ha->ct_sns = NULL; 2075 ha->ct_sns = NULL;
@@ -2071,13 +2098,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2071 } 2098 }
2072 INIT_LIST_HEAD(&ha->fcports); 2099 INIT_LIST_HEAD(&ha->fcports);
2073 2100
2074 vfree(ha->fw_dump);
2075 vfree(ha->fw_dump_buffer);
2076
2077 ha->fw_dump = NULL; 2101 ha->fw_dump = NULL;
2078 ha->fw_dumped = 0; 2102 ha->fw_dumped = 0;
2079 ha->fw_dump_reading = 0; 2103 ha->fw_dump_reading = 0;
2080 ha->fw_dump_buffer = NULL;
2081 2104
2082 vfree(ha->optrom_buffer); 2105 vfree(ha->optrom_buffer);
2083} 2106}
@@ -2215,9 +2238,6 @@ qla2x00_do_dpc(void *data)
2215 2238
2216 next_loopid = 0; 2239 next_loopid = 0;
2217 list_for_each_entry(fcport, &ha->fcports, list) { 2240 list_for_each_entry(fcport, &ha->fcports, list) {
2218 if (fcport->port_type != FCT_TARGET)
2219 continue;
2220
2221 /* 2241 /*
2222 * If the port is not ONLINE then try to login 2242 * If the port is not ONLINE then try to login
2223 * to it if we haven't run out of retries. 2243 * to it if we haven't run out of retries.
@@ -2617,40 +2637,16 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
2617}; 2637};
2618MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2638MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
2619 2639
2620static int __devinit
2621qla2xxx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2622{
2623 return qla2x00_probe_one(pdev);
2624}
2625
2626static void __devexit
2627qla2xxx_remove_one(struct pci_dev *pdev)
2628{
2629 qla2x00_remove_one(pdev);
2630}
2631
2632static struct pci_driver qla2xxx_pci_driver = { 2640static struct pci_driver qla2xxx_pci_driver = {
2633 .name = QLA2XXX_DRIVER_NAME, 2641 .name = QLA2XXX_DRIVER_NAME,
2634 .driver = { 2642 .driver = {
2635 .owner = THIS_MODULE, 2643 .owner = THIS_MODULE,
2636 }, 2644 },
2637 .id_table = qla2xxx_pci_tbl, 2645 .id_table = qla2xxx_pci_tbl,
2638 .probe = qla2xxx_probe_one, 2646 .probe = qla2x00_probe_one,
2639 .remove = __devexit_p(qla2xxx_remove_one), 2647 .remove = __devexit_p(qla2x00_remove_one),
2640}; 2648};
2641 2649
2642static inline int
2643qla2x00_pci_module_init(void)
2644{
2645 return pci_module_init(&qla2xxx_pci_driver);
2646}
2647
2648static inline void
2649qla2x00_pci_module_exit(void)
2650{
2651 pci_unregister_driver(&qla2xxx_pci_driver);
2652}
2653
2654/** 2650/**
2655 * qla2x00_module_init - Module initialization. 2651 * qla2x00_module_init - Module initialization.
2656 **/ 2652 **/
@@ -2670,16 +2666,16 @@ qla2x00_module_init(void)
2670 2666
2671 /* Derive version string. */ 2667 /* Derive version string. */
2672 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2668 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
2673#if DEBUG_QLA2100 2669 if (extended_error_logging)
2674 strcat(qla2x00_version_str, "-debug"); 2670 strcat(qla2x00_version_str, "-debug");
2675#endif 2671
2676 qla2xxx_transport_template = 2672 qla2xxx_transport_template =
2677 fc_attach_transport(&qla2xxx_transport_functions); 2673 fc_attach_transport(&qla2xxx_transport_functions);
2678 if (!qla2xxx_transport_template) 2674 if (!qla2xxx_transport_template)
2679 return -ENODEV; 2675 return -ENODEV;
2680 2676
2681 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n"); 2677 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
2682 ret = qla2x00_pci_module_init(); 2678 ret = pci_register_driver(&qla2xxx_pci_driver);
2683 if (ret) { 2679 if (ret) {
2684 kmem_cache_destroy(srb_cachep); 2680 kmem_cache_destroy(srb_cachep);
2685 fc_release_transport(qla2xxx_transport_template); 2681 fc_release_transport(qla2xxx_transport_template);
@@ -2693,7 +2689,7 @@ qla2x00_module_init(void)
2693static void __exit 2689static void __exit
2694qla2x00_module_exit(void) 2690qla2x00_module_exit(void)
2695{ 2691{
2696 qla2x00_pci_module_exit(); 2692 pci_unregister_driver(&qla2xxx_pci_driver);
2697 qla2x00_release_firmware(); 2693 qla2x00_release_firmware();
2698 kmem_cache_destroy(srb_cachep); 2694 kmem_cache_destroy(srb_cachep);
2699 fc_release_transport(qla2xxx_transport_template); 2695 fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6b315521bd89..971259032ef7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.05-k2" 10#define QLA2XXX_VERSION "8.01.07-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
14#define QLA_DRIVER_PATCH_VER 5 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 2203103adced..5b2f0741a55b 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1,6 +1,6 @@
1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 * 2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu) 3 * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net)
4 * 4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI 5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code. 6 * Qlogic ISP driver. Mucho kudos to him for this code.
@@ -46,8 +46,6 @@
46#include <scsi/scsi_tcq.h> 46#include <scsi/scsi_tcq.h>
47#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
48 48
49
50
51#define MAX_TARGETS 16 49#define MAX_TARGETS 16
52#define MAX_LUNS 8 /* 32 for 1.31 F/W */ 50#define MAX_LUNS 8 /* 32 for 1.31 F/W */
53 51
@@ -57,7 +55,6 @@
57 55
58static struct qlogicpti *qptichain = NULL; 56static struct qlogicpti *qptichain = NULL;
59static DEFINE_SPINLOCK(qptichain_lock); 57static DEFINE_SPINLOCK(qptichain_lock);
60static int qptis_running = 0;
61 58
62#define PACKB(a, b) (((a)<<4)|(b)) 59#define PACKB(a, b) (((a)<<4)|(b))
63 60
@@ -721,7 +718,7 @@ static int __init qpti_register_irq(struct qlogicpti *qpti)
721 * sanely maintain. 718 * sanely maintain.
722 */ 719 */
723 if (request_irq(qpti->irq, qpti_intr, 720 if (request_irq(qpti->irq, qpti_intr,
724 SA_SHIRQ, "Qlogic/PTI", qpti)) 721 IRQF_SHARED, "Qlogic/PTI", qpti))
725 goto fail; 722 goto fail;
726 723
727 printk("qpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); 724 printk("qpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
@@ -815,173 +812,6 @@ static int __init qpti_map_queues(struct qlogicpti *qpti)
815 return 0; 812 return 0;
816} 813}
817 814
818/* Detect all PTI Qlogic ISP's in the machine. */
819static int __init qlogicpti_detect(struct scsi_host_template *tpnt)
820{
821 struct qlogicpti *qpti;
822 struct Scsi_Host *qpti_host;
823 struct sbus_bus *sbus;
824 struct sbus_dev *sdev;
825 int nqptis = 0, nqptis_in_use = 0;
826
827 tpnt->proc_name = "qlogicpti";
828 for_each_sbus(sbus) {
829 for_each_sbusdev(sdev, sbus) {
830 /* Is this a red snapper? */
831 if (strcmp(sdev->prom_name, "ptisp") &&
832 strcmp(sdev->prom_name, "PTI,ptisp") &&
833 strcmp(sdev->prom_name, "QLGC,isp") &&
834 strcmp(sdev->prom_name, "SUNW,isp"))
835 continue;
836
837 /* Sometimes Antares cards come up not completely
838 * setup, and we get a report of a zero IRQ.
839 * Skip over them in such cases so we survive.
840 */
841 if (sdev->irqs[0] == 0) {
842 printk("qpti%d: Adapter reports no interrupt, "
843 "skipping over this card.", nqptis);
844 continue;
845 }
846
847 /* Yep, register and allocate software state. */
848 qpti_host = scsi_register(tpnt, sizeof(struct qlogicpti));
849 if (!qpti_host) {
850 printk("QPTI: Cannot register PTI Qlogic ISP SCSI host");
851 continue;
852 }
853 qpti = (struct qlogicpti *) qpti_host->hostdata;
854
855 /* We are wide capable, 16 targets. */
856 qpti_host->max_id = MAX_TARGETS;
857
858 /* Setup back pointers and misc. state. */
859 qpti->qhost = qpti_host;
860 qpti->sdev = sdev;
861 qpti->qpti_id = nqptis++;
862 qpti->prom_node = sdev->prom_node;
863 prom_getstring(qpti->prom_node, "name",
864 qpti->prom_name,
865 sizeof(qpti->prom_name));
866
867 /* This is not correct, actually. There's a switch
868 * on the PTI cards that put them into "emulation"
869 * mode- i.e., report themselves as QLGC,isp
870 * instead of PTI,ptisp. The only real substantive
871 * difference between non-pti and pti cards is
872 * the tmon register. Which is possibly even
873 * there for Qlogic cards, but non-functional.
874 */
875 qpti->is_pti = (strcmp (qpti->prom_name, "QLGC,isp") != 0);
876
877 qpti_chain_add(qpti);
878 if (qpti_map_regs(qpti) < 0)
879 goto fail_unlink;
880
881 if (qpti_register_irq(qpti) < 0)
882 goto fail_unmap_regs;
883
884 qpti_get_scsi_id(qpti);
885 qpti_get_bursts(qpti);
886 qpti_get_clock(qpti);
887
888 /* Clear out scsi_cmnd array. */
889 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
890
891 if (qpti_map_queues(qpti) < 0)
892 goto fail_free_irq;
893
894 /* Load the firmware. */
895 if (qlogicpti_load_firmware(qpti))
896 goto fail_unmap_queues;
897 if (qpti->is_pti) {
898 /* Check the PTI status reg. */
899 if (qlogicpti_verify_tmon(qpti))
900 goto fail_unmap_queues;
901 }
902
903 /* Reset the ISP and init res/req queues. */
904 if (qlogicpti_reset_hardware(qpti_host))
905 goto fail_unmap_queues;
906
907 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
908 qpti->fware_minrev, qpti->fware_micrev);
909 {
910 char buffer[60];
911
912 prom_getstring (qpti->prom_node,
913 "isp-fcode", buffer, 60);
914 if (buffer[0])
915 printk("(Firmware %s)", buffer);
916 if (prom_getbool(qpti->prom_node, "differential"))
917 qpti->differential = 1;
918 }
919
920 printk (" [%s Wide, using %s interface]\n",
921 (qpti->ultra ? "Ultra" : "Fast"),
922 (qpti->differential ? "differential" : "single ended"));
923
924 nqptis_in_use++;
925 continue;
926
927 fail_unmap_queues:
928#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
929 sbus_free_consistent(qpti->sdev,
930 QSIZE(RES_QUEUE_LEN),
931 qpti->res_cpu, qpti->res_dvma);
932 sbus_free_consistent(qpti->sdev,
933 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
934 qpti->req_cpu, qpti->req_dvma);
935#undef QSIZE
936 fail_free_irq:
937 free_irq(qpti->irq, qpti);
938
939 fail_unmap_regs:
940 sbus_iounmap(qpti->qregs,
941 qpti->sdev->reg_addrs[0].reg_size);
942 if (qpti->is_pti)
943 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
944 fail_unlink:
945 qpti_chain_del(qpti);
946 scsi_unregister(qpti->qhost);
947 }
948 }
949 if (nqptis)
950 printk("QPTI: Total of %d PTI Qlogic/ISP hosts found, %d actually in use.\n",
951 nqptis, nqptis_in_use);
952 qptis_running = nqptis_in_use;
953 return nqptis;
954}
955
956static int qlogicpti_release(struct Scsi_Host *host)
957{
958 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
959
960 /* Remove visibility from IRQ handlers. */
961 qpti_chain_del(qpti);
962
963 /* Shut up the card. */
964 sbus_writew(0, qpti->qregs + SBUS_CTRL);
965
966 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
967 free_irq(qpti->irq, qpti);
968
969#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
970 sbus_free_consistent(qpti->sdev,
971 QSIZE(RES_QUEUE_LEN),
972 qpti->res_cpu, qpti->res_dvma);
973 sbus_free_consistent(qpti->sdev,
974 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
975 qpti->req_cpu, qpti->req_dvma);
976#undef QSIZE
977
978 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size);
979 if (qpti->is_pti)
980 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
981
982 return 0;
983}
984
985const char *qlogicpti_info(struct Scsi_Host *host) 815const char *qlogicpti_info(struct Scsi_Host *host)
986{ 816{
987 static char buf[80]; 817 static char buf[80];
@@ -1044,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
1044 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
1045 int sg_count; 875 int sg_count;
1046 876
1047 sg = (struct scatterlist *) Cmnd->buffer; 877 sg = (struct scatterlist *) Cmnd->request_buffer;
1048 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
1049 879
1050 ds = cmd->dataseg; 880 ds = cmd->dataseg;
@@ -1448,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1448 1278
1449 if (Cmnd->use_sg) { 1279 if (Cmnd->use_sg) {
1450 sbus_unmap_sg(qpti->sdev, 1280 sbus_unmap_sg(qpti->sdev,
1451 (struct scatterlist *)Cmnd->buffer, 1281 (struct scatterlist *)Cmnd->request_buffer,
1452 Cmnd->use_sg, 1282 Cmnd->use_sg,
1453 Cmnd->sc_data_direction); 1283 Cmnd->sc_data_direction);
1454 } else { 1284 } else {
@@ -1551,9 +1381,9 @@ static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1551 return return_status; 1381 return return_status;
1552} 1382}
1553 1383
1554static struct scsi_host_template driver_template = { 1384static struct scsi_host_template qpti_template = {
1555 .detect = qlogicpti_detect, 1385 .module = THIS_MODULE,
1556 .release = qlogicpti_release, 1386 .name = "qlogicpti",
1557 .info = qlogicpti_info, 1387 .info = qlogicpti_info,
1558 .queuecommand = qlogicpti_queuecommand_slow, 1388 .queuecommand = qlogicpti_queuecommand_slow,
1559 .eh_abort_handler = qlogicpti_abort, 1389 .eh_abort_handler = qlogicpti_abort,
@@ -1565,8 +1395,189 @@ static struct scsi_host_template driver_template = {
1565 .use_clustering = ENABLE_CLUSTERING, 1395 .use_clustering = ENABLE_CLUSTERING,
1566}; 1396};
1567 1397
1398static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1399{
1400 static int nqptis;
1401 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1402 struct device_node *dp = dev->node;
1403 struct scsi_host_template *tpnt = match->data;
1404 struct Scsi_Host *host;
1405 struct qlogicpti *qpti;
1406 char *fcode;
1407
1408 /* Sometimes Antares cards come up not completely
1409 * setup, and we get a report of a zero IRQ.
1410 */
1411 if (sdev->irqs[0] == 0)
1412 return -ENODEV;
1413
1414 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
1415 if (!host)
1416 return -ENOMEM;
1417
1418 qpti = (struct qlogicpti *) host->hostdata;
1419
1420 host->max_id = MAX_TARGETS;
1421 qpti->qhost = host;
1422 qpti->sdev = sdev;
1423 qpti->qpti_id = nqptis;
1424 qpti->prom_node = sdev->prom_node;
1425 strcpy(qpti->prom_name, sdev->ofdev.node->name);
1426 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1427
1428 if (qpti_map_regs(qpti) < 0)
1429 goto fail_unlink;
1430
1431 if (qpti_register_irq(qpti) < 0)
1432 goto fail_unmap_regs;
1433
1434 qpti_get_scsi_id(qpti);
1435 qpti_get_bursts(qpti);
1436 qpti_get_clock(qpti);
1437
1438 /* Clear out scsi_cmnd array. */
1439 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
1440
1441 if (qpti_map_queues(qpti) < 0)
1442 goto fail_free_irq;
1443
1444 /* Load the firmware. */
1445 if (qlogicpti_load_firmware(qpti))
1446 goto fail_unmap_queues;
1447 if (qpti->is_pti) {
1448 /* Check the PTI status reg. */
1449 if (qlogicpti_verify_tmon(qpti))
1450 goto fail_unmap_queues;
1451 }
1452
1453 /* Reset the ISP and init res/req queues. */
1454 if (qlogicpti_reset_hardware(host))
1455 goto fail_unmap_queues;
1456
1457 if (scsi_add_host(host, &dev->dev))
1458 goto fail_unmap_queues;
1459
1460 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
1461 qpti->fware_minrev, qpti->fware_micrev);
1462
1463 fcode = of_get_property(dp, "isp-fcode", NULL);
1464 if (fcode && fcode[0])
1465 printk("(Firmware %s)", fcode);
1466 if (of_find_property(dp, "differential", NULL) != NULL)
1467 qpti->differential = 1;
1468
1469 printk (" [%s Wide, using %s interface]\n",
1470 (qpti->ultra ? "Ultra" : "Fast"),
1471 (qpti->differential ? "differential" : "single ended"));
1472
1473 dev_set_drvdata(&sdev->ofdev.dev, qpti);
1474
1475 qpti_chain_add(qpti);
1476
1477 scsi_scan_host(host);
1478 nqptis++;
1479
1480 return 0;
1481
1482fail_unmap_queues:
1483#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1484 sbus_free_consistent(qpti->sdev,
1485 QSIZE(RES_QUEUE_LEN),
1486 qpti->res_cpu, qpti->res_dvma);
1487 sbus_free_consistent(qpti->sdev,
1488 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1489 qpti->req_cpu, qpti->req_dvma);
1490#undef QSIZE
1491
1492fail_unmap_regs:
1493 sbus_iounmap(qpti->qregs,
1494 qpti->sdev->reg_addrs[0].reg_size);
1495 if (qpti->is_pti)
1496 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
1497
1498fail_free_irq:
1499 free_irq(qpti->irq, qpti);
1500
1501fail_unlink:
1502 scsi_host_put(host);
1503
1504 return -ENODEV;
1505}
1506
1507static int __devexit qpti_sbus_remove(struct of_device *dev)
1508{
1509 struct qlogicpti *qpti = dev_get_drvdata(&dev->dev);
1510
1511 qpti_chain_del(qpti);
1512
1513 scsi_remove_host(qpti->qhost);
1514
1515 /* Shut up the card. */
1516 sbus_writew(0, qpti->qregs + SBUS_CTRL);
1517
1518 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1519 free_irq(qpti->irq, qpti);
1520
1521#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1522 sbus_free_consistent(qpti->sdev,
1523 QSIZE(RES_QUEUE_LEN),
1524 qpti->res_cpu, qpti->res_dvma);
1525 sbus_free_consistent(qpti->sdev,
1526 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1527 qpti->req_cpu, qpti->req_dvma);
1528#undef QSIZE
1529
1530 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size);
1531 if (qpti->is_pti)
1532 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
1533
1534 scsi_host_put(qpti->qhost);
1535
1536 return 0;
1537}
1538
1539static struct of_device_id qpti_match[] = {
1540 {
1541 .name = "ptisp",
1542 .data = &qpti_template,
1543 },
1544 {
1545 .name = "PTI,ptisp",
1546 .data = &qpti_template,
1547 },
1548 {
1549 .name = "QLGC,isp",
1550 .data = &qpti_template,
1551 },
1552 {
1553 .name = "SUNW,isp",
1554 .data = &qpti_template,
1555 },
1556 {},
1557};
1558MODULE_DEVICE_TABLE(of, qpti_match);
1559
1560static struct of_platform_driver qpti_sbus_driver = {
1561 .name = "qpti",
1562 .match_table = qpti_match,
1563 .probe = qpti_sbus_probe,
1564 .remove = __devexit_p(qpti_sbus_remove),
1565};
1568 1566
1569#include "scsi_module.c" 1567static int __init qpti_init(void)
1568{
1569 return of_register_driver(&qpti_sbus_driver, &sbus_bus_type);
1570}
1571
1572static void __exit qpti_exit(void)
1573{
1574 of_unregister_driver(&qpti_sbus_driver);
1575}
1570 1576
1577MODULE_DESCRIPTION("QlogicISP SBUS driver");
1578MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1571MODULE_LICENSE("GPL"); 1579MODULE_LICENSE("GPL");
1580MODULE_VERSION("2.0");
1572 1581
1582module_init(qpti_init);
1583module_exit(qpti_exit);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
deleted file mode 100644
index 4a71578df3c1..000000000000
--- a/drivers/scsi/sata_mv.c
+++ /dev/null
@@ -1,2468 +0,0 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/dma-mapping.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h>
37#include <asm/io.h>
38
39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7"
41
42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50
51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
59 MV_SATAHC0_REG_BASE = 0x20000,
60 MV_FLASH_CTL = 0x1046c,
61 MV_GPIO_PORT_CTL = 0x104f0,
62 MV_RESET_CFG = 0x180d8,
63
64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
68
69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70
71 MV_MAX_Q_DEPTH = 32,
72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
73
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 */
79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
81 MV_MAX_SG_CT = 176,
82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84
85 MV_PORTS_PER_HC = 4,
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 MV_PORT_HC_SHIFT = 2,
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89 MV_PORT_MASK = 3,
90
91 /* Host Flags */
92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
109 /* PCI interface registers */
110
111 PCI_COMMAND_OFS = 0xc00,
112
113 PCI_MAIN_CMD_STS_OFS = 0xd30,
114 STOP_PCI_MASTER = (1 << 2),
115 PCI_MASTER_EMPTY = (1 << 3),
116 GLOB_SFT_RST = (1 << 4),
117
118 MV_PCI_MODE = 0xd00,
119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
120 MV_PCI_DISC_TIMER = 0xd04,
121 MV_PCI_MSI_TRIGGER = 0xc38,
122 MV_PCI_SERR_MASK = 0xc28,
123 MV_PCI_XBAR_TMOUT = 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
127 MV_PCI_ERR_COMMAND = 0x1d50,
128
129 PCI_IRQ_CAUSE_OFS = 0x1d58,
130 PCI_IRQ_MASK_OFS = 0x1d5c,
131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
132
133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
135 PORT0_ERR = (1 << 0), /* shift by port # */
136 PORT0_DONE = (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
139 PCI_ERR = (1 << 18),
140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT = (1 << 22),
144 SELF_INT = (1 << 23),
145 TWSI_INT = (1 << 24),
146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 HC_MAIN_RSVD),
150
151 /* SATAHC registers */
152 HC_CFG_OFS = 0,
153
154 HC_IRQ_CAUSE_OFS = 0x14,
155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
157 DEV_IRQ = (1 << 8), /* shift by port # */
158
159 /* Shadow block registers */
160 SHD_BLK_OFS = 0x100,
161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162
163 /* SATA registers */
164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS = 0x350,
166 PHY_MODE3 = 0x310,
167 PHY_MODE4 = 0x314,
168 PHY_MODE2 = 0x330,
169 MV5_PHY_MODE = 0x74,
170 MV5_LT_MODE = 0x30,
171 MV5_PHY_CTL = 0x0C,
172 SATA_INTERFACE_CTL = 0x050,
173
174 MV_M2_PREAMP_MASK = 0x7e0,
175
176 /* Port registers */
177 EDMA_CFG_OFS = 0,
178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
179 EDMA_CFG_NCQ = (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
183
184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
185 EDMA_ERR_IRQ_MASK_OFS = 0xc,
186 EDMA_ERR_D_PAR = (1 << 0),
187 EDMA_ERR_PRD_PAR = (1 << 1),
188 EDMA_ERR_DEV = (1 << 2),
189 EDMA_ERR_DEV_DCON = (1 << 3),
190 EDMA_ERR_DEV_CON = (1 << 4),
191 EDMA_ERR_SERR = (1 << 5),
192 EDMA_ERR_SELF_DIS = (1 << 7),
193 EDMA_ERR_BIST_ASYNC = (1 << 8),
194 EDMA_ERR_CRBQ_PAR = (1 << 9),
195 EDMA_ERR_CRPB_PAR = (1 << 10),
196 EDMA_ERR_INTRL_PAR = (1 << 11),
197 EDMA_ERR_IORDY = (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
200 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO = (1 << 31),
204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208 EDMA_ERR_LNK_DATA_RX |
209 EDMA_ERR_LNK_DATA_TX |
210 EDMA_ERR_TRANS_PROTO),
211
212 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
214
215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
216 EDMA_REQ_Q_PTR_SHIFT = 5,
217
218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT = 3,
222
223 EDMA_CMD_OFS = 0x28,
224 EDMA_EN = (1 << 0),
225 EDMA_DS = (1 << 1),
226 ATA_RST = (1 << 2),
227
228 EDMA_IORDY_TMOUT = 0x34,
229 EDMA_ARB_CFG = 0x38,
230
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI = (1 << 0),
233 MV_HP_ERRATA_50XXB0 = (1 << 1),
234 MV_HP_ERRATA_50XXB2 = (1 << 2),
235 MV_HP_ERRATA_60X1B2 = (1 << 3),
236 MV_HP_ERRATA_60X1C0 = (1 << 4),
237 MV_HP_ERRATA_XX42A0 = (1 << 5),
238 MV_HP_50XX = (1 << 6),
239 MV_HP_GEN_IIE = (1 << 7),
240
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN = (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244};
245
246#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248#define IS_GEN_I(hpriv) IS_50XX(hpriv)
249#define IS_GEN_II(hpriv) IS_60XX(hpriv)
250#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251
252enum {
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
255 */
256 MV_DMA_BOUNDARY = 0xffffU,
257
258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
259
260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
261};
262
263enum chip_type {
264 chip_504x,
265 chip_508x,
266 chip_5080,
267 chip_604x,
268 chip_608x,
269 chip_6042,
270 chip_7042,
271};
272
273/* Command ReQuest Block: 32B */
274struct mv_crqb {
275 __le32 sg_addr;
276 __le32 sg_addr_hi;
277 __le16 ctrl_flags;
278 __le16 ata_cmd[11];
279};
280
281struct mv_crqb_iie {
282 __le32 addr;
283 __le32 addr_hi;
284 __le32 flags;
285 __le32 len;
286 __le32 ata_cmd[4];
287};
288
289/* Command ResPonse Block: 8B */
290struct mv_crpb {
291 __le16 id;
292 __le16 flags;
293 __le32 tmstmp;
294};
295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg {
298 __le32 addr;
299 __le32 flags_size;
300 __le32 addr_hi;
301 __le32 reserved;
302};
303
304struct mv_port_priv {
305 struct mv_crqb *crqb;
306 dma_addr_t crqb_dma;
307 struct mv_crpb *crpb;
308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma;
311 u32 pp_flags;
312};
313
314struct mv_port_signal {
315 u32 amps;
316 u32 pre;
317};
318
319struct mv_host_priv;
320struct mv_hw_ops {
321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322 unsigned int port);
323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325 void __iomem *mmio);
326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327 unsigned int n_hc);
328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330};
331
332struct mv_host_priv {
333 u32 hp_flags;
334 struct mv_port_signal signal[8];
335 const struct mv_hw_ops *ops;
336};
337
338static void mv_irq_clear(struct ata_port *ap);
339static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host_set *host_set);
346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc);
349static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352 struct pt_regs *regs);
353static void mv_eng_timeout(struct ata_port *ap);
354static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355
356static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357 unsigned int port);
358static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360 void __iomem *mmio);
361static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362 unsigned int n_hc);
363static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365
366static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367 unsigned int port);
368static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370 void __iomem *mmio);
371static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int n_hc);
373static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376 unsigned int port_no);
377static void mv_stop_and_reset(struct ata_port *ap);
378
379static struct scsi_host_template mv_sht = {
380 .module = THIS_MODULE,
381 .name = DRV_NAME,
382 .ioctl = ata_scsi_ioctl,
383 .queuecommand = ata_scsi_queuecmd,
384 .can_queue = MV_USE_Q_DEPTH,
385 .this_id = ATA_SHT_THIS_ID,
386 .sg_tablesize = MV_MAX_SG_CT / 2,
387 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
388 .emulated = ATA_SHT_EMULATED,
389 .use_clustering = ATA_SHT_USE_CLUSTERING,
390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
394 .bios_param = ata_std_bios_param,
395};
396
397static const struct ata_port_operations mv5_ops = {
398 .port_disable = ata_port_disable,
399
400 .tf_load = ata_tf_load,
401 .tf_read = ata_tf_read,
402 .check_status = ata_check_status,
403 .exec_command = ata_exec_command,
404 .dev_select = ata_std_dev_select,
405
406 .phy_reset = mv_phy_reset,
407
408 .qc_prep = mv_qc_prep,
409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
411
412 .eng_timeout = mv_eng_timeout,
413
414 .irq_handler = mv_interrupt,
415 .irq_clear = mv_irq_clear,
416
417 .scr_read = mv5_scr_read,
418 .scr_write = mv5_scr_write,
419
420 .port_start = mv_port_start,
421 .port_stop = mv_port_stop,
422 .host_stop = mv_host_stop,
423};
424
425static const struct ata_port_operations mv6_ops = {
426 .port_disable = ata_port_disable,
427
428 .tf_load = ata_tf_load,
429 .tf_read = ata_tf_read,
430 .check_status = ata_check_status,
431 .exec_command = ata_exec_command,
432 .dev_select = ata_std_dev_select,
433
434 .phy_reset = mv_phy_reset,
435
436 .qc_prep = mv_qc_prep,
437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
439
440 .eng_timeout = mv_eng_timeout,
441
442 .irq_handler = mv_interrupt,
443 .irq_clear = mv_irq_clear,
444
445 .scr_read = mv_scr_read,
446 .scr_write = mv_scr_write,
447
448 .port_start = mv_port_start,
449 .port_stop = mv_port_stop,
450 .host_stop = mv_host_stop,
451};
452
453static const struct ata_port_operations mv_iie_ops = {
454 .port_disable = ata_port_disable,
455
456 .tf_load = ata_tf_load,
457 .tf_read = ata_tf_read,
458 .check_status = ata_check_status,
459 .exec_command = ata_exec_command,
460 .dev_select = ata_std_dev_select,
461
462 .phy_reset = mv_phy_reset,
463
464 .qc_prep = mv_qc_prep_iie,
465 .qc_issue = mv_qc_issue,
466
467 .eng_timeout = mv_eng_timeout,
468
469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear,
471
472 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write,
474
475 .port_start = mv_port_start,
476 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478};
479
480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */
482 .sht = &mv_sht,
483 .host_flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
487 },
488 { /* chip_508x */
489 .sht = &mv_sht,
490 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
494 },
495 { /* chip_5080 */
496 .sht = &mv_sht,
497 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
501 },
502 { /* chip_604x */
503 .sht = &mv_sht,
504 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
508 },
509 { /* chip_608x */
510 .sht = &mv_sht,
511 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
516 },
517 { /* chip_6042 */
518 .sht = &mv_sht,
519 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
523 },
524 { /* chip_7042 */
525 .sht = &mv_sht,
526 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv_iie_ops,
531 },
532};
533
534static const struct pci_device_id mv_pci_tbl[] = {
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545
546 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547 {} /* terminate list */
548};
549
550static struct pci_driver mv_pci_driver = {
551 .name = DRV_NAME,
552 .id_table = mv_pci_tbl,
553 .probe = mv_init_one,
554 .remove = ata_pci_remove_one,
555};
556
557static const struct mv_hw_ops mv5xxx_ops = {
558 .phy_errata = mv5_phy_errata,
559 .enable_leds = mv5_enable_leds,
560 .read_preamp = mv5_read_preamp,
561 .reset_hc = mv5_reset_hc,
562 .reset_flash = mv5_reset_flash,
563 .reset_bus = mv5_reset_bus,
564};
565
566static const struct mv_hw_ops mv6xxx_ops = {
567 .phy_errata = mv6_phy_errata,
568 .enable_leds = mv6_enable_leds,
569 .read_preamp = mv6_read_preamp,
570 .reset_hc = mv6_reset_hc,
571 .reset_flash = mv6_reset_flash,
572 .reset_bus = mv_reset_pci_bus,
573};
574
575/*
576 * module options
577 */
578static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579
580
581/*
582 * Functions
583 */
584
585static inline void writelfl(unsigned long data, void __iomem *addr)
586{
587 writel(data, addr);
588 (void) readl(addr); /* flush to avoid PCI posted write */
589}
590
591static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592{
593 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594}
595
596static inline unsigned int mv_hc_from_port(unsigned int port)
597{
598 return port >> MV_PORT_HC_SHIFT;
599}
600
601static inline unsigned int mv_hardport_from_port(unsigned int port)
602{
603 return port & MV_PORT_MASK;
604}
605
606static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607 unsigned int port)
608{
609 return mv_hc_base(base, mv_hc_from_port(port));
610}
611
612static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613{
614 return mv_hc_base_from_port(base, port) +
615 MV_SATAHC_ARBTR_REG_SZ +
616 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617}
618
619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{
621 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
622}
623
624static inline int mv_get_hc_count(unsigned long host_flags)
625{
626 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627}
628
629static void mv_irq_clear(struct ata_port *ap)
630{
631}
632
633/**
634 * mv_start_dma - Enable eDMA engine
635 * @base: port base address
636 * @pp: port private data
637 *
638 * Verify the local cache of the eDMA state is accurate with a
639 * WARN_ON.
640 *
641 * LOCKING:
642 * Inherited from caller.
643 */
644static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645{
646 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649 }
650 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651}
652
653/**
654 * mv_stop_dma - Disable eDMA engine
655 * @ap: ATA channel to manipulate
656 *
657 * Verify the local cache of the eDMA state is accurate with a
658 * WARN_ON.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663static void mv_stop_dma(struct ata_port *ap)
664{
665 void __iomem *port_mmio = mv_ap_base(ap);
666 struct mv_port_priv *pp = ap->private_data;
667 u32 reg;
668 int i;
669
670 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671 /* Disable EDMA if active. The disable bit auto clears.
672 */
673 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675 } else {
676 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677 }
678
679 /* now properly wait for the eDMA to stop */
680 for (i = 1000; i > 0; i--) {
681 reg = readl(port_mmio + EDMA_CMD_OFS);
682 if (!(EDMA_EN & reg)) {
683 break;
684 }
685 udelay(100);
686 }
687
688 if (EDMA_EN & reg) {
689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690 /* FIXME: Consider doing a reset here to recover */
691 }
692}
693
694#ifdef ATA_DEBUG
695static void mv_dump_mem(void __iomem *start, unsigned bytes)
696{
697 int b, w;
698 for (b = 0; b < bytes; ) {
699 DPRINTK("%p: ", start + b);
700 for (w = 0; b < bytes && w < 4; w++) {
701 printk("%08x ",readl(start + b));
702 b += sizeof(u32);
703 }
704 printk("\n");
705 }
706}
707#endif
708
709static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710{
711#ifdef ATA_DEBUG
712 int b, w;
713 u32 dw;
714 for (b = 0; b < bytes; ) {
715 DPRINTK("%02x: ", b);
716 for (w = 0; b < bytes && w < 4; w++) {
717 (void) pci_read_config_dword(pdev,b,&dw);
718 printk("%08x ",dw);
719 b += sizeof(u32);
720 }
721 printk("\n");
722 }
723#endif
724}
725static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726 struct pci_dev *pdev)
727{
728#ifdef ATA_DEBUG
729 void __iomem *hc_base = mv_hc_base(mmio_base,
730 port >> MV_PORT_HC_SHIFT);
731 void __iomem *port_base;
732 int start_port, num_ports, p, start_hc, num_hcs, hc;
733
734 if (0 > port) {
735 start_hc = start_port = 0;
736 num_ports = 8; /* shld be benign for 4 port devs */
737 num_hcs = 2;
738 } else {
739 start_hc = port >> MV_PORT_HC_SHIFT;
740 start_port = port;
741 num_ports = num_hcs = 1;
742 }
743 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744 num_ports > 1 ? num_ports - 1 : start_port);
745
746 if (NULL != pdev) {
747 DPRINTK("PCI config space regs:\n");
748 mv_dump_pci_cfg(pdev, 0x68);
749 }
750 DPRINTK("PCI regs:\n");
751 mv_dump_mem(mmio_base+0xc00, 0x3c);
752 mv_dump_mem(mmio_base+0xd00, 0x34);
753 mv_dump_mem(mmio_base+0xf00, 0x4);
754 mv_dump_mem(mmio_base+0x1d00, 0x6c);
755 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756 hc_base = mv_hc_base(mmio_base, hc);
757 DPRINTK("HC regs (HC %i):\n", hc);
758 mv_dump_mem(hc_base, 0x1c);
759 }
760 for (p = start_port; p < start_port + num_ports; p++) {
761 port_base = mv_port_base(mmio_base, p);
762 DPRINTK("EDMA regs (port %i):\n",p);
763 mv_dump_mem(port_base, 0x54);
764 DPRINTK("SATA regs (port %i):\n",p);
765 mv_dump_mem(port_base+0x300, 0x60);
766 }
767#endif
768}
769
770static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771{
772 unsigned int ofs;
773
774 switch (sc_reg_in) {
775 case SCR_STATUS:
776 case SCR_CONTROL:
777 case SCR_ERROR:
778 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779 break;
780 case SCR_ACTIVE:
781 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
782 break;
783 default:
784 ofs = 0xffffffffU;
785 break;
786 }
787 return ofs;
788}
789
790static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791{
792 unsigned int ofs = mv_scr_offset(sc_reg_in);
793
794 if (0xffffffffU != ofs) {
795 return readl(mv_ap_base(ap) + ofs);
796 } else {
797 return (u32) ofs;
798 }
799}
800
801static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802{
803 unsigned int ofs = mv_scr_offset(sc_reg_in);
804
805 if (0xffffffffU != ofs) {
806 writelfl(val, mv_ap_base(ap) + ofs);
807 }
808}
809
810/**
811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host_set: host data structure
813 *
814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop.
816 *
817 * LOCKING:
818 * Inherited from caller.
819 */
820static void mv_host_stop(struct ata_host_set *host_set)
821{
822 struct mv_host_priv *hpriv = host_set->private_data;
823 struct pci_dev *pdev = to_pci_dev(host_set->dev);
824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev);
827 } else {
828 pci_intx(pdev, 0);
829 }
830 kfree(hpriv);
831 ata_host_stop(host_set);
832}
833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835{
836 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837}
838
839static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840{
841 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842
843 /* set up non-NCQ EDMA configuration */
844 cfg &= ~0x1f; /* clear queue depth */
845 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
846 cfg &= ~(1 << 9); /* disable equeue */
847
848 if (IS_GEN_I(hpriv))
849 cfg |= (1 << 8); /* enab config burst size mask */
850
851 else if (IS_GEN_II(hpriv))
852 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853
854 else if (IS_GEN_IIE(hpriv)) {
855 cfg |= (1 << 23); /* dis RX PM port mask */
856 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
857 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
858 cfg |= (1 << 18); /* enab early completion */
859 cfg |= (1 << 17); /* enab host q cache */
860 cfg |= (1 << 22); /* enab cutthrough */
861 }
862
863 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864}
865
866/**
867 * mv_port_start - Port specific init/start routine.
868 * @ap: ATA channel to manipulate
869 *
870 * Allocate and point to DMA memory, init port private memory,
871 * zero indices.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876static int mv_port_start(struct ata_port *ap)
877{
878 struct device *dev = ap->host_set->dev;
879 struct mv_host_priv *hpriv = ap->host_set->private_data;
880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem;
883 dma_addr_t mem_dma;
884 int rc = -ENOMEM;
885
886 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887 if (!pp)
888 goto err_out;
889 memset(pp, 0, sizeof(*pp));
890
891 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892 GFP_KERNEL);
893 if (!mem)
894 goto err_out_pp;
895 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896
897 rc = ata_pad_alloc(ap, dev);
898 if (rc)
899 goto err_out_priv;
900
901 /* First item in chunk of DMA memory:
902 * 32-slot command request table (CRQB), 32 bytes each in size
903 */
904 pp->crqb = mem;
905 pp->crqb_dma = mem_dma;
906 mem += MV_CRQB_Q_SZ;
907 mem_dma += MV_CRQB_Q_SZ;
908
909 /* Second item:
910 * 32-slot command response table (CRPB), 8 bytes each in size
911 */
912 pp->crpb = mem;
913 pp->crpb_dma = mem_dma;
914 mem += MV_CRPB_Q_SZ;
915 mem_dma += MV_CRPB_Q_SZ;
916
917 /* Third item:
918 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919 */
920 pp->sg_tbl = mem;
921 pp->sg_tbl_dma = mem_dma;
922
923 mv_edma_cfg(hpriv, port_mmio);
924
925 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928
929 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930 writelfl(pp->crqb_dma & 0xffffffff,
931 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932 else
933 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934
935 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936
937 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938 writelfl(pp->crpb_dma & 0xffffffff,
939 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940 else
941 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945
946 /* Don't turn on EDMA here...do it before DMA commands only. Else
947 * we'll be unable to send non-data, PIO, etc due to restricted access
948 * to shadow regs.
949 */
950 ap->private_data = pp;
951 return 0;
952
953err_out_priv:
954 mv_priv_free(pp, dev);
955err_out_pp:
956 kfree(pp);
957err_out:
958 return rc;
959}
960
961/**
962 * mv_port_stop - Port specific cleanup/stop routine.
963 * @ap: ATA channel to manipulate
964 *
965 * Stop DMA, cleanup port memory.
966 *
967 * LOCKING:
968 * This routine uses the host_set lock to protect the DMA stop.
969 */
970static void mv_port_stop(struct ata_port *ap)
971{
972 struct device *dev = ap->host_set->dev;
973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags;
975
976 spin_lock_irqsave(&ap->host_set->lock, flags);
977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host_set->lock, flags);
979
980 ap->private_data = NULL;
981 ata_pad_free(ap, dev);
982 mv_priv_free(pp, dev);
983 kfree(pp);
984}
985
986/**
987 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988 * @qc: queued command whose SG list to source from
989 *
990 * Populate the SG list and mark the last entry.
991 *
992 * LOCKING:
993 * Inherited from caller.
994 */
995static void mv_fill_sg(struct ata_queued_cmd *qc)
996{
997 struct mv_port_priv *pp = qc->ap->private_data;
998 unsigned int i = 0;
999 struct scatterlist *sg;
1000
1001 ata_for_each_sg(sg, qc) {
1002 dma_addr_t addr;
1003 u32 sg_len, len, offset;
1004
1005 addr = sg_dma_address(sg);
1006 sg_len = sg_dma_len(sg);
1007
1008 while (sg_len) {
1009 offset = addr & MV_DMA_BOUNDARY;
1010 len = sg_len;
1011 if ((offset + sg_len) > 0x10000)
1012 len = 0x10000 - offset;
1013
1014 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017
1018 sg_len -= len;
1019 addr += len;
1020
1021 if (!sg_len && ata_sg_is_last(sg, qc))
1022 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023
1024 i++;
1025 }
1026 }
1027}
1028
1029static inline unsigned mv_inc_q_index(unsigned index)
1030{
1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032}
1033
1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035{
1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037 (last ? CRQB_CMD_LAST : 0);
1038 *cmdw = cpu_to_le16(tmp);
1039}
1040
1041/**
1042 * mv_qc_prep - Host specific command preparation.
1043 * @qc: queued command to prepare
1044 *
1045 * This routine simply redirects to the general purpose routine
1046 * if command is not DMA. Else, it handles prep of the CRQB
1047 * (command request block), does some sanity checking, and calls
1048 * the SG load routine.
1049 *
1050 * LOCKING:
1051 * Inherited from caller.
1052 */
1053static void mv_qc_prep(struct ata_queued_cmd *qc)
1054{
1055 struct ata_port *ap = qc->ap;
1056 struct mv_port_priv *pp = ap->private_data;
1057 __le16 *cw;
1058 struct ata_taskfile *tf;
1059 u16 flags = 0;
1060 unsigned in_index;
1061
1062 if (ATA_PROT_DMA != qc->tf.protocol)
1063 return;
1064
1065 /* Fill in command request block
1066 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1071
1072 /* get current queue index from hardware */
1073 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075
1076 pp->crqb[in_index].sg_addr =
1077 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078 pp->crqb[in_index].sg_addr_hi =
1079 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081
1082 cw = &pp->crqb[in_index].ata_cmd[0];
1083 tf = &qc->tf;
1084
1085 /* Sadly, the CRQB cannot accomodate all registers--there are
1086 * only 11 bytes...so we must pick and choose required
1087 * registers based on the command. So, we drop feature and
1088 * hob_feature for [RW] DMA commands, but they are needed for
1089 * NCQ. NCQ will drop hob_nsect.
1090 */
1091 switch (tf->command) {
1092 case ATA_CMD_READ:
1093 case ATA_CMD_READ_EXT:
1094 case ATA_CMD_WRITE:
1095 case ATA_CMD_WRITE_EXT:
1096 case ATA_CMD_WRITE_FUA_EXT:
1097 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098 break;
1099#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1100 case ATA_CMD_FPDMA_READ:
1101 case ATA_CMD_FPDMA_WRITE:
1102 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104 break;
1105#endif /* FIXME: remove this line when NCQ added */
1106 default:
1107 /* The only other commands EDMA supports in non-queued and
1108 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109 * of which are defined/used by Linux. If we get here, this
1110 * driver needs work.
1111 *
1112 * FIXME: modify libata to give qc_prep a return value and
1113 * return error here.
1114 */
1115 BUG_ON(tf->command);
1116 break;
1117 }
1118 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1127
1128 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129 return;
1130 mv_fill_sg(qc);
1131}
1132
1133/**
1134 * mv_qc_prep_iie - Host specific command preparation.
1135 * @qc: queued command to prepare
1136 *
1137 * This routine simply redirects to the general purpose routine
1138 * if command is not DMA. Else, it handles prep of the CRQB
1139 * (command request block), does some sanity checking, and calls
1140 * the SG load routine.
1141 *
1142 * LOCKING:
1143 * Inherited from caller.
1144 */
1145static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 struct mv_port_priv *pp = ap->private_data;
1149 struct mv_crqb_iie *crqb;
1150 struct ata_taskfile *tf;
1151 unsigned in_index;
1152 u32 flags = 0;
1153
1154 if (ATA_PROT_DMA != qc->tf.protocol)
1155 return;
1156
1157 /* Fill in Gen IIE command request block
1158 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1161
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1164
1165 /* get current queue index from hardware */
1166 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168
1169 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172 crqb->flags = cpu_to_le32(flags);
1173
1174 tf = &qc->tf;
1175 crqb->ata_cmd[0] = cpu_to_le32(
1176 (tf->command << 16) |
1177 (tf->feature << 24)
1178 );
1179 crqb->ata_cmd[1] = cpu_to_le32(
1180 (tf->lbal << 0) |
1181 (tf->lbam << 8) |
1182 (tf->lbah << 16) |
1183 (tf->device << 24)
1184 );
1185 crqb->ata_cmd[2] = cpu_to_le32(
1186 (tf->hob_lbal << 0) |
1187 (tf->hob_lbam << 8) |
1188 (tf->hob_lbah << 16) |
1189 (tf->hob_feature << 24)
1190 );
1191 crqb->ata_cmd[3] = cpu_to_le32(
1192 (tf->nsect << 0) |
1193 (tf->hob_nsect << 8)
1194 );
1195
1196 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197 return;
1198 mv_fill_sg(qc);
1199}
1200
1201/**
1202 * mv_qc_issue - Initiate a command to the host
1203 * @qc: queued command to start
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it sanity checks our local
1207 * caches of the request producer/consumer indices then enables
1208 * DMA and bumps the request producer index.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
1213static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214{
1215 void __iomem *port_mmio = mv_ap_base(qc->ap);
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 unsigned in_index;
1218 u32 in_ptr;
1219
1220 if (ATA_PROT_DMA != qc->tf.protocol) {
1221 /* We're about to send a non-EDMA capable command to the
1222 * port. Turn off EDMA so there won't be problems accessing
1223 * shadow block, etc registers.
1224 */
1225 mv_stop_dma(qc->ap);
1226 return ata_qc_issue_prot(qc);
1227 }
1228
1229 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231
1232 /* until we do queuing, the queue should be empty at this point */
1233 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235
1236 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1237
1238 mv_start_dma(port_mmio, pp);
1239
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244
1245 return 0;
1246}
1247
1248/**
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1251 *
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1257 *
1258 * LOCKING:
1259 * Inherited from caller.
1260 */
1261static u8 mv_get_crpb_status(struct ata_port *ap)
1262{
1263 void __iomem *port_mmio = mv_ap_base(ap);
1264 struct mv_port_priv *pp = ap->private_data;
1265 unsigned out_index;
1266 u32 out_ptr;
1267 u8 ata_status;
1268
1269 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271
1272 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273 >> CRPB_FLAG_STATUS_SHIFT;
1274
1275 /* increment our consumer index... */
1276 out_index = mv_inc_q_index(out_index);
1277
1278 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1279 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286
1287 /* Return ATA status register for completed CRPB */
1288 return ata_status;
1289}
1290
1291/**
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1294 * @reset_allowed: bool: 0 == don't trigger from reset here
1295 *
1296 * In most cases, just clear the interrupt and move on. However,
1297 * some cases require an eDMA reset, which is done right before
1298 * the COMRESET in mv_phy_reset(). The SERR case requires a
1299 * clear of pending errors in the SATA SERROR register. Finally,
1300 * if the port disabled DMA, update our cached copy to match.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306{
1307 void __iomem *port_mmio = mv_ap_base(ap);
1308 u32 edma_err_cause, serr = 0;
1309
1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311
1312 if (EDMA_ERR_SERR & edma_err_cause) {
1313 sata_scr_read(ap, SCR_ERROR, &serr);
1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1315 }
1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317 struct mv_port_priv *pp = ap->private_data;
1318 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 }
1320 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322
1323 /* Clear EDMA now that SERR cleanup done */
1324 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325
1326 /* check for fatal here and recover if needed */
1327 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328 mv_stop_and_reset(ap);
1329}
1330
1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1336 *
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1343 *
1344 * LOCKING:
1345 * Inherited from caller.
1346 */
1347static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1348 unsigned int hc)
1349{
1350 void __iomem *mmio = host_set->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc;
1353 u32 hc_irq_cause;
1354 int shift, port, port0, hard_port, handled;
1355 unsigned int err_mask;
1356
1357 if (hc == 0) {
1358 port0 = 0;
1359 } else {
1360 port0 = MV_PORTS_PER_HC;
1361 }
1362
1363 /* we'll need the HC success int register in most cases */
1364 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1365 if (hc_irq_cause) {
1366 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1367 }
1368
1369 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370 hc,relevant,hc_irq_cause);
1371
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0;
1374 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data;
1376
1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */
1379
1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases.
1382 * The cause of this is still under investigation.
1383 */
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */
1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1387 ata_status = mv_get_crpb_status(ap);
1388 handled = 1;
1389 }
1390 } else {
1391 /* PIO: check for device (drive) interrupt */
1392 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1393 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr);
1395 handled = 1;
1396 /* ignore spurious intr if drive still BUSY */
1397 if (ata_status & ATA_BUSY) {
1398 ata_status = 0;
1399 handled = 0;
1400 }
1401 }
1402 }
1403
1404 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1405 continue;
1406
1407 err_mask = ac_err_mask(ata_status);
1408
1409 shift = port << 1; /* (port * 2) */
1410 if (port >= MV_PORTS_PER_HC) {
1411 shift++; /* skip bit 8 in the HC Main IRQ reg */
1412 }
1413 if ((PORT0_ERR << shift) & relevant) {
1414 mv_err_intr(ap, 1);
1415 err_mask |= AC_ERR_OTHER;
1416 handled = 1;
1417 }
1418
1419 if (handled) {
1420 qc = ata_qc_from_tag(ap, ap->active_tag);
1421 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1422 VPRINTK("port %u IRQ found for qc, "
1423 "ata_status 0x%x\n", port,ata_status);
1424 /* mark qc status appropriately */
1425 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1426 qc->err_mask |= err_mask;
1427 ata_qc_complete(qc);
1428 }
1429 }
1430 }
1431 }
1432 VPRINTK("EXIT\n");
1433}
1434
1435/**
1436 * mv_interrupt -
1437 * @irq: unused
1438 * @dev_instance: private data; in this case the host structure
1439 * @regs: unused
1440 *
1441 * Read the read only register to determine if any host
1442 * controllers have pending interrupts. If so, call lower level
1443 * routine to handle. Also check for PCI errors which are only
1444 * reported here.
1445 *
1446 * LOCKING:
1447 * This routine holds the host_set lock while processing pending
1448 * interrupts.
1449 */
1450static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451 struct pt_regs *regs)
1452{
1453 struct ata_host_set *host_set = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host_set->mmio_base;
1456 struct mv_host_priv *hpriv;
1457 u32 irq_stat;
1458
1459 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1460
1461 /* check the cases where we either have nothing pending or have read
1462 * a bogus register value which can indicate HW removal or PCI fault
1463 */
1464 if (!irq_stat || (0xffffffffU == irq_stat)) {
1465 return IRQ_NONE;
1466 }
1467
1468 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1469 spin_lock(&host_set->lock);
1470
1471 for (hc = 0; hc < n_hcs; hc++) {
1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473 if (relevant) {
1474 mv_host_intr(host_set, relevant, hc);
1475 handled++;
1476 }
1477 }
1478
1479 hpriv = host_set->private_data;
1480 if (IS_60XX(hpriv)) {
1481 /* deal with the interrupt coalescing bits */
1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1485 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1486 }
1487 }
1488
1489 if (PCI_ERR & irq_stat) {
1490 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1491 readl(mmio + PCI_IRQ_CAUSE_OFS));
1492
1493 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1495
1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497 handled++;
1498 }
1499 spin_unlock(&host_set->lock);
1500
1501 return IRQ_RETVAL(handled);
1502}
1503
1504static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1505{
1506 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1507 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1508
1509 return hc_mmio + ofs;
1510}
1511
1512static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1513{
1514 unsigned int ofs;
1515
1516 switch (sc_reg_in) {
1517 case SCR_STATUS:
1518 case SCR_ERROR:
1519 case SCR_CONTROL:
1520 ofs = sc_reg_in * sizeof(u32);
1521 break;
1522 default:
1523 ofs = 0xffffffffU;
1524 break;
1525 }
1526 return ofs;
1527}
1528
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{
1531 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533
1534 if (ofs != 0xffffffffU)
1535 return readl(mmio + ofs);
1536 else
1537 return (u32) ofs;
1538}
1539
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{
1542 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544
1545 if (ofs != 0xffffffffU)
1546 writelfl(val, mmio + ofs);
1547}
1548
1549static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1550{
1551 u8 rev_id;
1552 int early_5080;
1553
1554 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1555
1556 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1557
1558 if (!early_5080) {
1559 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1560 tmp |= (1 << 0);
1561 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1562 }
1563
1564 mv_reset_pci_bus(pdev, mmio);
1565}
1566
1567static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1568{
1569 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1570}
1571
1572static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1573 void __iomem *mmio)
1574{
1575 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1576 u32 tmp;
1577
1578 tmp = readl(phy_mmio + MV5_PHY_MODE);
1579
1580 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1581 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1582}
1583
1584static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1585{
1586 u32 tmp;
1587
1588 writel(0, mmio + MV_GPIO_PORT_CTL);
1589
1590 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1591
1592 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1593 tmp |= ~(1 << 0);
1594 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1595}
1596
1597static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1598 unsigned int port)
1599{
1600 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1601 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1602 u32 tmp;
1603 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1604
1605 if (fix_apm_sq) {
1606 tmp = readl(phy_mmio + MV5_LT_MODE);
1607 tmp |= (1 << 19);
1608 writel(tmp, phy_mmio + MV5_LT_MODE);
1609
1610 tmp = readl(phy_mmio + MV5_PHY_CTL);
1611 tmp &= ~0x3;
1612 tmp |= 0x1;
1613 writel(tmp, phy_mmio + MV5_PHY_CTL);
1614 }
1615
1616 tmp = readl(phy_mmio + MV5_PHY_MODE);
1617 tmp &= ~mask;
1618 tmp |= hpriv->signal[port].pre;
1619 tmp |= hpriv->signal[port].amps;
1620 writel(tmp, phy_mmio + MV5_PHY_MODE);
1621}
1622
1623
1624#undef ZERO
1625#define ZERO(reg) writel(0, port_mmio + (reg))
1626static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1627 unsigned int port)
1628{
1629 void __iomem *port_mmio = mv_port_base(mmio, port);
1630
1631 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1632
1633 mv_channel_reset(hpriv, mmio, port);
1634
1635 ZERO(0x028); /* command */
1636 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1637 ZERO(0x004); /* timer */
1638 ZERO(0x008); /* irq err cause */
1639 ZERO(0x00c); /* irq err mask */
1640 ZERO(0x010); /* rq bah */
1641 ZERO(0x014); /* rq inp */
1642 ZERO(0x018); /* rq outp */
1643 ZERO(0x01c); /* respq bah */
1644 ZERO(0x024); /* respq outp */
1645 ZERO(0x020); /* respq inp */
1646 ZERO(0x02c); /* test control */
1647 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1648}
1649#undef ZERO
1650
1651#define ZERO(reg) writel(0, hc_mmio + (reg))
1652static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1653 unsigned int hc)
1654{
1655 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1656 u32 tmp;
1657
1658 ZERO(0x00c);
1659 ZERO(0x010);
1660 ZERO(0x014);
1661 ZERO(0x018);
1662
1663 tmp = readl(hc_mmio + 0x20);
1664 tmp &= 0x1c1c1c1c;
1665 tmp |= 0x03030303;
1666 writel(tmp, hc_mmio + 0x20);
1667}
1668#undef ZERO
1669
1670static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1671 unsigned int n_hc)
1672{
1673 unsigned int hc, port;
1674
1675 for (hc = 0; hc < n_hc; hc++) {
1676 for (port = 0; port < MV_PORTS_PER_HC; port++)
1677 mv5_reset_hc_port(hpriv, mmio,
1678 (hc * MV_PORTS_PER_HC) + port);
1679
1680 mv5_reset_one_hc(hpriv, mmio, hc);
1681 }
1682
1683 return 0;
1684}
1685
1686#undef ZERO
1687#define ZERO(reg) writel(0, mmio + (reg))
1688static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1689{
1690 u32 tmp;
1691
1692 tmp = readl(mmio + MV_PCI_MODE);
1693 tmp &= 0xff00ffff;
1694 writel(tmp, mmio + MV_PCI_MODE);
1695
1696 ZERO(MV_PCI_DISC_TIMER);
1697 ZERO(MV_PCI_MSI_TRIGGER);
1698 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1699 ZERO(HC_MAIN_IRQ_MASK_OFS);
1700 ZERO(MV_PCI_SERR_MASK);
1701 ZERO(PCI_IRQ_CAUSE_OFS);
1702 ZERO(PCI_IRQ_MASK_OFS);
1703 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1704 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1705 ZERO(MV_PCI_ERR_ATTRIBUTE);
1706 ZERO(MV_PCI_ERR_COMMAND);
1707}
1708#undef ZERO
1709
1710static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1711{
1712 u32 tmp;
1713
1714 mv5_reset_flash(hpriv, mmio);
1715
1716 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1717 tmp &= 0x3;
1718 tmp |= (1 << 5) | (1 << 6);
1719 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1720}
1721
1722/**
1723 * mv6_reset_hc - Perform the 6xxx global soft reset
1724 * @mmio: base address of the HBA
1725 *
1726 * This routine only applies to 6xxx parts.
1727 *
1728 * LOCKING:
1729 * Inherited from caller.
1730 */
1731static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1732 unsigned int n_hc)
1733{
1734 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1735 int i, rc = 0;
1736 u32 t;
1737
1738 /* Following procedure defined in PCI "main command and status
1739 * register" table.
1740 */
1741 t = readl(reg);
1742 writel(t | STOP_PCI_MASTER, reg);
1743
1744 for (i = 0; i < 1000; i++) {
1745 udelay(1);
1746 t = readl(reg);
1747 if (PCI_MASTER_EMPTY & t) {
1748 break;
1749 }
1750 }
1751 if (!(PCI_MASTER_EMPTY & t)) {
1752 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1753 rc = 1;
1754 goto done;
1755 }
1756
1757 /* set reset */
1758 i = 5;
1759 do {
1760 writel(t | GLOB_SFT_RST, reg);
1761 t = readl(reg);
1762 udelay(1);
1763 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1764
1765 if (!(GLOB_SFT_RST & t)) {
1766 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1767 rc = 1;
1768 goto done;
1769 }
1770
1771 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1772 i = 5;
1773 do {
1774 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1775 t = readl(reg);
1776 udelay(1);
1777 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1778
1779 if (GLOB_SFT_RST & t) {
1780 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1781 rc = 1;
1782 }
1783done:
1784 return rc;
1785}
1786
1787static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1788 void __iomem *mmio)
1789{
1790 void __iomem *port_mmio;
1791 u32 tmp;
1792
1793 tmp = readl(mmio + MV_RESET_CFG);
1794 if ((tmp & (1 << 0)) == 0) {
1795 hpriv->signal[idx].amps = 0x7 << 8;
1796 hpriv->signal[idx].pre = 0x1 << 5;
1797 return;
1798 }
1799
1800 port_mmio = mv_port_base(mmio, idx);
1801 tmp = readl(port_mmio + PHY_MODE2);
1802
1803 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1804 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1805}
1806
1807static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1808{
1809 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1810}
1811
1812static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1813 unsigned int port)
1814{
1815 void __iomem *port_mmio = mv_port_base(mmio, port);
1816
1817 u32 hp_flags = hpriv->hp_flags;
1818 int fix_phy_mode2 =
1819 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1820 int fix_phy_mode4 =
1821 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1822 u32 m2, tmp;
1823
1824 if (fix_phy_mode2) {
1825 m2 = readl(port_mmio + PHY_MODE2);
1826 m2 &= ~(1 << 16);
1827 m2 |= (1 << 31);
1828 writel(m2, port_mmio + PHY_MODE2);
1829
1830 udelay(200);
1831
1832 m2 = readl(port_mmio + PHY_MODE2);
1833 m2 &= ~((1 << 16) | (1 << 31));
1834 writel(m2, port_mmio + PHY_MODE2);
1835
1836 udelay(200);
1837 }
1838
1839 /* who knows what this magic does */
1840 tmp = readl(port_mmio + PHY_MODE3);
1841 tmp &= ~0x7F800000;
1842 tmp |= 0x2A800000;
1843 writel(tmp, port_mmio + PHY_MODE3);
1844
1845 if (fix_phy_mode4) {
1846 u32 m4;
1847
1848 m4 = readl(port_mmio + PHY_MODE4);
1849
1850 if (hp_flags & MV_HP_ERRATA_60X1B2)
1851 tmp = readl(port_mmio + 0x310);
1852
1853 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1854
1855 writel(m4, port_mmio + PHY_MODE4);
1856
1857 if (hp_flags & MV_HP_ERRATA_60X1B2)
1858 writel(tmp, port_mmio + 0x310);
1859 }
1860
1861 /* Revert values of pre-emphasis and signal amps to the saved ones */
1862 m2 = readl(port_mmio + PHY_MODE2);
1863
1864 m2 &= ~MV_M2_PREAMP_MASK;
1865 m2 |= hpriv->signal[port].amps;
1866 m2 |= hpriv->signal[port].pre;
1867 m2 &= ~(1 << 16);
1868
1869 /* according to mvSata 3.6.1, some IIE values are fixed */
1870 if (IS_GEN_IIE(hpriv)) {
1871 m2 &= ~0xC30FF01F;
1872 m2 |= 0x0000900F;
1873 }
1874
1875 writel(m2, port_mmio + PHY_MODE2);
1876}
1877
1878static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1879 unsigned int port_no)
1880{
1881 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1882
1883 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1884
1885 if (IS_60XX(hpriv)) {
1886 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1887 ifctl |= (1 << 7); /* enable gen2i speed */
1888 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1889 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1890 }
1891
1892 udelay(25); /* allow reset propagation */
1893
1894 /* Spec never mentions clearing the bit. Marvell's driver does
1895 * clear the bit, however.
1896 */
1897 writelfl(0, port_mmio + EDMA_CMD_OFS);
1898
1899 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1900
1901 if (IS_50XX(hpriv))
1902 mdelay(1);
1903}
1904
1905static void mv_stop_and_reset(struct ata_port *ap)
1906{
1907 struct mv_host_priv *hpriv = ap->host_set->private_data;
1908 void __iomem *mmio = ap->host_set->mmio_base;
1909
1910 mv_stop_dma(ap);
1911
1912 mv_channel_reset(hpriv, mmio, ap->port_no);
1913
1914 __mv_phy_reset(ap, 0);
1915}
1916
1917static inline void __msleep(unsigned int msec, int can_sleep)
1918{
1919 if (can_sleep)
1920 msleep(msec);
1921 else
1922 mdelay(msec);
1923}
1924
1925/**
1926 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1927 * @ap: ATA channel to manipulate
1928 *
1929 * Part of this is taken from __sata_phy_reset and modified to
1930 * not sleep since this routine gets called from interrupt level.
1931 *
1932 * LOCKING:
1933 * Inherited from caller. This is coded to safe to call at
1934 * interrupt level, i.e. it does not sleep.
1935 */
1936static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937{
1938 struct mv_port_priv *pp = ap->private_data;
1939 struct mv_host_priv *hpriv = ap->host_set->private_data;
1940 void __iomem *port_mmio = mv_ap_base(ap);
1941 struct ata_taskfile tf;
1942 struct ata_device *dev = &ap->device[0];
1943 unsigned long timeout;
1944 int retry = 5;
1945 u32 sstatus;
1946
1947 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1948
1949 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1950 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1951 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1952
1953 /* Issue COMRESET via SControl */
1954comreset_retry:
1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1956 __msleep(1, can_sleep);
1957
1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1959 __msleep(20, can_sleep);
1960
1961 timeout = jiffies + msecs_to_jiffies(200);
1962 do {
1963 sata_scr_read(ap, SCR_STATUS, &sstatus);
1964 sstatus &= 0x3;
1965 if ((sstatus == 3) || (sstatus == 0))
1966 break;
1967
1968 __msleep(1, can_sleep);
1969 } while (time_before(jiffies, timeout));
1970
1971 /* work around errata */
1972 if (IS_60XX(hpriv) &&
1973 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1974 (retry-- > 0))
1975 goto comreset_retry;
1976
1977 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1978 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1979 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1980
1981 if (ata_port_online(ap)) {
1982 ata_port_probe(ap);
1983 } else {
1984 sata_scr_read(ap, SCR_STATUS, &sstatus);
1985 ata_port_printk(ap, KERN_INFO,
1986 "no device found (phy stat %08x)\n", sstatus);
1987 ata_port_disable(ap);
1988 return;
1989 }
1990 ap->cbl = ATA_CBL_SATA;
1991
1992 /* even after SStatus reflects that device is ready,
1993 * it seems to take a while for link to be fully
1994 * established (and thus Status no longer 0x80/0x7F),
1995 * so we poll a bit for that, here.
1996 */
1997 retry = 20;
1998 while (1) {
1999 u8 drv_stat = ata_check_status(ap);
2000 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2001 break;
2002 __msleep(500, can_sleep);
2003 if (retry-- <= 0)
2004 break;
2005 }
2006
2007 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2008 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2009 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2010 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2011
2012 dev->class = ata_dev_classify(&tf);
2013 if (!ata_dev_enabled(dev)) {
2014 VPRINTK("Port disabled post-sig: No device present.\n");
2015 ata_port_disable(ap);
2016 }
2017
2018 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2019
2020 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2021
2022 VPRINTK("EXIT\n");
2023}
2024
2025static void mv_phy_reset(struct ata_port *ap)
2026{
2027 __mv_phy_reset(ap, 1);
2028}
2029
2030/**
2031 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2032 * @ap: ATA channel to manipulate
2033 *
2034 * Intent is to clear all pending error conditions, reset the
2035 * chip/bus, fail the command, and move on.
2036 *
2037 * LOCKING:
2038 * This routine holds the host_set lock while failing the command.
2039 */
2040static void mv_eng_timeout(struct ata_port *ap)
2041{
2042 struct ata_queued_cmd *qc;
2043 unsigned long flags;
2044
2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2046 DPRINTK("All regs @ start of eng_timeout\n");
2047 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2048 to_pci_dev(ap->host_set->dev));
2049
2050 qc = ata_qc_from_tag(ap, ap->active_tag);
2051 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2052 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2053 &qc->scsicmd->cmnd);
2054
2055 spin_lock_irqsave(&ap->host_set->lock, flags);
2056 mv_err_intr(ap, 0);
2057 mv_stop_and_reset(ap);
2058 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2059
2060 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2061 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2062 qc->err_mask |= AC_ERR_TIMEOUT;
2063 ata_eh_qc_complete(qc);
2064 }
2065}
2066
2067/**
2068 * mv_port_init - Perform some early initialization on a single port.
2069 * @port: libata data structure storing shadow register addresses
2070 * @port_mmio: base address of the port
2071 *
2072 * Initialize shadow register mmio addresses, clear outstanding
2073 * interrupts on the port, and unmask interrupts for the future
2074 * start of the port.
2075 *
2076 * LOCKING:
2077 * Inherited from caller.
2078 */
2079static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2080{
2081 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2082 unsigned serr_ofs;
2083
2084 /* PIO related setup
2085 */
2086 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2087 port->error_addr =
2088 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2089 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2090 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2091 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2092 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2093 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2094 port->status_addr =
2095 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2096 /* special case: control/altstatus doesn't have ATA_REG_ address */
2097 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2098
2099 /* unused: */
2100 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2101
2102 /* Clear any currently outstanding port interrupt conditions */
2103 serr_ofs = mv_scr_offset(SCR_ERROR);
2104 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2105 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2106
2107 /* unmask all EDMA error interrupts */
2108 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2109
2110 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2111 readl(port_mmio + EDMA_CFG_OFS),
2112 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2113 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2114}
2115
2116static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2117 unsigned int board_idx)
2118{
2119 u8 rev_id;
2120 u32 hp_flags = hpriv->hp_flags;
2121
2122 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2123
2124 switch(board_idx) {
2125 case chip_5080:
2126 hpriv->ops = &mv5xxx_ops;
2127 hp_flags |= MV_HP_50XX;
2128
2129 switch (rev_id) {
2130 case 0x1:
2131 hp_flags |= MV_HP_ERRATA_50XXB0;
2132 break;
2133 case 0x3:
2134 hp_flags |= MV_HP_ERRATA_50XXB2;
2135 break;
2136 default:
2137 dev_printk(KERN_WARNING, &pdev->dev,
2138 "Applying 50XXB2 workarounds to unknown rev\n");
2139 hp_flags |= MV_HP_ERRATA_50XXB2;
2140 break;
2141 }
2142 break;
2143
2144 case chip_504x:
2145 case chip_508x:
2146 hpriv->ops = &mv5xxx_ops;
2147 hp_flags |= MV_HP_50XX;
2148
2149 switch (rev_id) {
2150 case 0x0:
2151 hp_flags |= MV_HP_ERRATA_50XXB0;
2152 break;
2153 case 0x3:
2154 hp_flags |= MV_HP_ERRATA_50XXB2;
2155 break;
2156 default:
2157 dev_printk(KERN_WARNING, &pdev->dev,
2158 "Applying B2 workarounds to unknown rev\n");
2159 hp_flags |= MV_HP_ERRATA_50XXB2;
2160 break;
2161 }
2162 break;
2163
2164 case chip_604x:
2165 case chip_608x:
2166 hpriv->ops = &mv6xxx_ops;
2167
2168 switch (rev_id) {
2169 case 0x7:
2170 hp_flags |= MV_HP_ERRATA_60X1B2;
2171 break;
2172 case 0x9:
2173 hp_flags |= MV_HP_ERRATA_60X1C0;
2174 break;
2175 default:
2176 dev_printk(KERN_WARNING, &pdev->dev,
2177 "Applying B2 workarounds to unknown rev\n");
2178 hp_flags |= MV_HP_ERRATA_60X1B2;
2179 break;
2180 }
2181 break;
2182
2183 case chip_7042:
2184 case chip_6042:
2185 hpriv->ops = &mv6xxx_ops;
2186
2187 hp_flags |= MV_HP_GEN_IIE;
2188
2189 switch (rev_id) {
2190 case 0x0:
2191 hp_flags |= MV_HP_ERRATA_XX42A0;
2192 break;
2193 case 0x1:
2194 hp_flags |= MV_HP_ERRATA_60X1C0;
2195 break;
2196 default:
2197 dev_printk(KERN_WARNING, &pdev->dev,
2198 "Applying 60X1C0 workarounds to unknown rev\n");
2199 hp_flags |= MV_HP_ERRATA_60X1C0;
2200 break;
2201 }
2202 break;
2203
2204 default:
2205 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2206 return 1;
2207 }
2208
2209 hpriv->hp_flags = hp_flags;
2210
2211 return 0;
2212}
2213
2214/**
2215 * mv_init_host - Perform some early initialization of the host.
2216 * @pdev: host PCI device
2217 * @probe_ent: early data struct representing the host
2218 *
2219 * If possible, do an early global reset of the host. Then do
2220 * our port init and clear/unmask all/relevant host interrupts.
2221 *
2222 * LOCKING:
2223 * Inherited from caller.
2224 */
2225static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2226 unsigned int board_idx)
2227{
2228 int rc = 0, n_hc, port, hc;
2229 void __iomem *mmio = probe_ent->mmio_base;
2230 struct mv_host_priv *hpriv = probe_ent->private_data;
2231
2232 /* global interrupt mask */
2233 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2234
2235 rc = mv_chip_id(pdev, hpriv, board_idx);
2236 if (rc)
2237 goto done;
2238
2239 n_hc = mv_get_hc_count(probe_ent->host_flags);
2240 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2241
2242 for (port = 0; port < probe_ent->n_ports; port++)
2243 hpriv->ops->read_preamp(hpriv, port, mmio);
2244
2245 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2246 if (rc)
2247 goto done;
2248
2249 hpriv->ops->reset_flash(hpriv, mmio);
2250 hpriv->ops->reset_bus(pdev, mmio);
2251 hpriv->ops->enable_leds(hpriv, mmio);
2252
2253 for (port = 0; port < probe_ent->n_ports; port++) {
2254 if (IS_60XX(hpriv)) {
2255 void __iomem *port_mmio = mv_port_base(mmio, port);
2256
2257 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2258 ifctl |= (1 << 7); /* enable gen2i speed */
2259 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2260 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2261 }
2262
2263 hpriv->ops->phy_errata(hpriv, mmio, port);
2264 }
2265
2266 for (port = 0; port < probe_ent->n_ports; port++) {
2267 void __iomem *port_mmio = mv_port_base(mmio, port);
2268 mv_port_init(&probe_ent->port[port], port_mmio);
2269 }
2270
2271 for (hc = 0; hc < n_hc; hc++) {
2272 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2273
2274 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2275 "(before clear)=0x%08x\n", hc,
2276 readl(hc_mmio + HC_CFG_OFS),
2277 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2278
2279 /* Clear any currently outstanding hc interrupt conditions */
2280 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2281 }
2282
2283 /* Clear any currently outstanding host interrupt conditions */
2284 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2285
2286 /* and unmask interrupt generation for host regs */
2287 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2288 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2289
2290 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2291 "PCI int cause/mask=0x%08x/0x%08x\n",
2292 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2293 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2294 readl(mmio + PCI_IRQ_CAUSE_OFS),
2295 readl(mmio + PCI_IRQ_MASK_OFS));
2296
2297done:
2298 return rc;
2299}
2300
2301/**
2302 * mv_print_info - Dump key info to kernel log for perusal.
2303 * @probe_ent: early data struct representing the host
2304 *
2305 * FIXME: complete this.
2306 *
2307 * LOCKING:
2308 * Inherited from caller.
2309 */
2310static void mv_print_info(struct ata_probe_ent *probe_ent)
2311{
2312 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2313 struct mv_host_priv *hpriv = probe_ent->private_data;
2314 u8 rev_id, scc;
2315 const char *scc_s;
2316
2317 /* Use this to determine the HW stepping of the chip so we know
2318 * what errata to workaround
2319 */
2320 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2321
2322 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2323 if (scc == 0)
2324 scc_s = "SCSI";
2325 else if (scc == 0x01)
2326 scc_s = "RAID";
2327 else
2328 scc_s = "unknown";
2329
2330 dev_printk(KERN_INFO, &pdev->dev,
2331 "%u slots %u ports %s mode IRQ via %s\n",
2332 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2333 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2334}
2335
2336/**
2337 * mv_init_one - handle a positive probe of a Marvell host
2338 * @pdev: PCI device found
2339 * @ent: PCI device ID entry for the matched host
2340 *
2341 * LOCKING:
2342 * Inherited from caller.
2343 */
2344static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2345{
2346 static int printed_version = 0;
2347 struct ata_probe_ent *probe_ent = NULL;
2348 struct mv_host_priv *hpriv;
2349 unsigned int board_idx = (unsigned int)ent->driver_data;
2350 void __iomem *mmio_base;
2351 int pci_dev_busy = 0, rc;
2352
2353 if (!printed_version++)
2354 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2355
2356 rc = pci_enable_device(pdev);
2357 if (rc) {
2358 return rc;
2359 }
2360 pci_set_master(pdev);
2361
2362 rc = pci_request_regions(pdev, DRV_NAME);
2363 if (rc) {
2364 pci_dev_busy = 1;
2365 goto err_out;
2366 }
2367
2368 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2369 if (probe_ent == NULL) {
2370 rc = -ENOMEM;
2371 goto err_out_regions;
2372 }
2373
2374 memset(probe_ent, 0, sizeof(*probe_ent));
2375 probe_ent->dev = pci_dev_to_dev(pdev);
2376 INIT_LIST_HEAD(&probe_ent->node);
2377
2378 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2379 if (mmio_base == NULL) {
2380 rc = -ENOMEM;
2381 goto err_out_free_ent;
2382 }
2383
2384 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2385 if (!hpriv) {
2386 rc = -ENOMEM;
2387 goto err_out_iounmap;
2388 }
2389 memset(hpriv, 0, sizeof(*hpriv));
2390
2391 probe_ent->sht = mv_port_info[board_idx].sht;
2392 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2393 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2394 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2395 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2396
2397 probe_ent->irq = pdev->irq;
2398 probe_ent->irq_flags = SA_SHIRQ;
2399 probe_ent->mmio_base = mmio_base;
2400 probe_ent->private_data = hpriv;
2401
2402 /* initialize adapter */
2403 rc = mv_init_host(pdev, probe_ent, board_idx);
2404 if (rc) {
2405 goto err_out_hpriv;
2406 }
2407
2408 /* Enable interrupts */
2409 if (msi && pci_enable_msi(pdev) == 0) {
2410 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2411 } else {
2412 pci_intx(pdev, 1);
2413 }
2414
2415 mv_dump_pci_cfg(pdev, 0x68);
2416 mv_print_info(probe_ent);
2417
2418 if (ata_device_add(probe_ent) == 0) {
2419 rc = -ENODEV; /* No devices discovered */
2420 goto err_out_dev_add;
2421 }
2422
2423 kfree(probe_ent);
2424 return 0;
2425
2426err_out_dev_add:
2427 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2428 pci_disable_msi(pdev);
2429 } else {
2430 pci_intx(pdev, 0);
2431 }
2432err_out_hpriv:
2433 kfree(hpriv);
2434err_out_iounmap:
2435 pci_iounmap(pdev, mmio_base);
2436err_out_free_ent:
2437 kfree(probe_ent);
2438err_out_regions:
2439 pci_release_regions(pdev);
2440err_out:
2441 if (!pci_dev_busy) {
2442 pci_disable_device(pdev);
2443 }
2444
2445 return rc;
2446}
2447
2448static int __init mv_init(void)
2449{
2450 return pci_module_init(&mv_pci_driver);
2451}
2452
2453static void __exit mv_exit(void)
2454{
2455 pci_unregister_driver(&mv_pci_driver);
2456}
2457
2458MODULE_AUTHOR("Brett Russ");
2459MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2460MODULE_LICENSE("GPL");
2461MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2462MODULE_VERSION(DRV_VERSION);
2463
2464module_param(msi, int, 0444);
2465MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2466
2467module_init(mv_init);
2468module_exit(mv_exit);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
deleted file mode 100644
index d18e7e0932ef..000000000000
--- a/drivers/scsi/sata_nv.c
+++ /dev/null
@@ -1,596 +0,0 @@
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 */
33
34#include <linux/config.h>
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/device.h>
43#include <scsi/scsi_host.h>
44#include <linux/libata.h>
45
46#define DRV_NAME "sata_nv"
47#define DRV_VERSION "0.9"
48
49enum {
50 NV_PORTS = 2,
51 NV_PIO_MASK = 0x1f,
52 NV_MWDMA_MASK = 0x07,
53 NV_UDMA_MASK = 0x7f,
54 NV_PORT0_SCR_REG_OFFSET = 0x00,
55 NV_PORT1_SCR_REG_OFFSET = 0x40,
56
57 /* INT_STATUS/ENABLE */
58 NV_INT_STATUS = 0x10,
59 NV_INT_ENABLE = 0x11,
60 NV_INT_STATUS_CK804 = 0x440,
61 NV_INT_ENABLE_CK804 = 0x441,
62
63 /* INT_STATUS/ENABLE bits */
64 NV_INT_DEV = 0x01,
65 NV_INT_PM = 0x02,
66 NV_INT_ADDED = 0x04,
67 NV_INT_REMOVED = 0x08,
68
69 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
70
71 NV_INT_ALL = 0x0f,
72 NV_INT_MASK = NV_INT_DEV |
73 NV_INT_ADDED | NV_INT_REMOVED,
74
75 /* INT_CONFIG */
76 NV_INT_CONFIG = 0x12,
77 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
78
79 // For PCI config register 20
80 NV_MCP_SATA_CFG_20 = 0x50,
81 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
82};
83
84static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
85static void nv_ck804_host_stop(struct ata_host_set *host_set);
86static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
87 struct pt_regs *regs);
88static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
89 struct pt_regs *regs);
90static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
91 struct pt_regs *regs);
92static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
93static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
94
95static void nv_nf2_freeze(struct ata_port *ap);
96static void nv_nf2_thaw(struct ata_port *ap);
97static void nv_ck804_freeze(struct ata_port *ap);
98static void nv_ck804_thaw(struct ata_port *ap);
99static void nv_error_handler(struct ata_port *ap);
100
101enum nv_host_type
102{
103 GENERIC,
104 NFORCE2,
105 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
106 CK804
107};
108
109static const struct pci_device_id nv_pci_tbl[] = {
110 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
112 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
114 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
116 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
118 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
120 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
122 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
124 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
126 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
128 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
130 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
132 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
134 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
136 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
142 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
143 PCI_ANY_ID, PCI_ANY_ID,
144 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
145 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
146 PCI_ANY_ID, PCI_ANY_ID,
147 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
148 { 0, } /* terminate list */
149};
150
151static struct pci_driver nv_pci_driver = {
152 .name = DRV_NAME,
153 .id_table = nv_pci_tbl,
154 .probe = nv_init_one,
155 .remove = ata_pci_remove_one,
156};
157
158static struct scsi_host_template nv_sht = {
159 .module = THIS_MODULE,
160 .name = DRV_NAME,
161 .ioctl = ata_scsi_ioctl,
162 .queuecommand = ata_scsi_queuecmd,
163 .can_queue = ATA_DEF_QUEUE,
164 .this_id = ATA_SHT_THIS_ID,
165 .sg_tablesize = LIBATA_MAX_PRD,
166 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
167 .emulated = ATA_SHT_EMULATED,
168 .use_clustering = ATA_SHT_USE_CLUSTERING,
169 .proc_name = DRV_NAME,
170 .dma_boundary = ATA_DMA_BOUNDARY,
171 .slave_configure = ata_scsi_slave_config,
172 .slave_destroy = ata_scsi_slave_destroy,
173 .bios_param = ata_std_bios_param,
174};
175
176static const struct ata_port_operations nv_generic_ops = {
177 .port_disable = ata_port_disable,
178 .tf_load = ata_tf_load,
179 .tf_read = ata_tf_read,
180 .exec_command = ata_exec_command,
181 .check_status = ata_check_status,
182 .dev_select = ata_std_dev_select,
183 .bmdma_setup = ata_bmdma_setup,
184 .bmdma_start = ata_bmdma_start,
185 .bmdma_stop = ata_bmdma_stop,
186 .bmdma_status = ata_bmdma_status,
187 .qc_prep = ata_qc_prep,
188 .qc_issue = ata_qc_issue_prot,
189 .freeze = ata_bmdma_freeze,
190 .thaw = ata_bmdma_thaw,
191 .error_handler = nv_error_handler,
192 .post_internal_cmd = ata_bmdma_post_internal_cmd,
193 .data_xfer = ata_pio_data_xfer,
194 .irq_handler = nv_generic_interrupt,
195 .irq_clear = ata_bmdma_irq_clear,
196 .scr_read = nv_scr_read,
197 .scr_write = nv_scr_write,
198 .port_start = ata_port_start,
199 .port_stop = ata_port_stop,
200 .host_stop = ata_pci_host_stop,
201};
202
203static const struct ata_port_operations nv_nf2_ops = {
204 .port_disable = ata_port_disable,
205 .tf_load = ata_tf_load,
206 .tf_read = ata_tf_read,
207 .exec_command = ata_exec_command,
208 .check_status = ata_check_status,
209 .dev_select = ata_std_dev_select,
210 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start,
212 .bmdma_stop = ata_bmdma_stop,
213 .bmdma_status = ata_bmdma_status,
214 .qc_prep = ata_qc_prep,
215 .qc_issue = ata_qc_issue_prot,
216 .freeze = nv_nf2_freeze,
217 .thaw = nv_nf2_thaw,
218 .error_handler = nv_error_handler,
219 .post_internal_cmd = ata_bmdma_post_internal_cmd,
220 .data_xfer = ata_pio_data_xfer,
221 .irq_handler = nv_nf2_interrupt,
222 .irq_clear = ata_bmdma_irq_clear,
223 .scr_read = nv_scr_read,
224 .scr_write = nv_scr_write,
225 .port_start = ata_port_start,
226 .port_stop = ata_port_stop,
227 .host_stop = ata_pci_host_stop,
228};
229
230static const struct ata_port_operations nv_ck804_ops = {
231 .port_disable = ata_port_disable,
232 .tf_load = ata_tf_load,
233 .tf_read = ata_tf_read,
234 .exec_command = ata_exec_command,
235 .check_status = ata_check_status,
236 .dev_select = ata_std_dev_select,
237 .bmdma_setup = ata_bmdma_setup,
238 .bmdma_start = ata_bmdma_start,
239 .bmdma_stop = ata_bmdma_stop,
240 .bmdma_status = ata_bmdma_status,
241 .qc_prep = ata_qc_prep,
242 .qc_issue = ata_qc_issue_prot,
243 .freeze = nv_ck804_freeze,
244 .thaw = nv_ck804_thaw,
245 .error_handler = nv_error_handler,
246 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 .data_xfer = ata_pio_data_xfer,
248 .irq_handler = nv_ck804_interrupt,
249 .irq_clear = ata_bmdma_irq_clear,
250 .scr_read = nv_scr_read,
251 .scr_write = nv_scr_write,
252 .port_start = ata_port_start,
253 .port_stop = ata_port_stop,
254 .host_stop = nv_ck804_host_stop,
255};
256
257static struct ata_port_info nv_port_info[] = {
258 /* generic */
259 {
260 .sht = &nv_sht,
261 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
262 .pio_mask = NV_PIO_MASK,
263 .mwdma_mask = NV_MWDMA_MASK,
264 .udma_mask = NV_UDMA_MASK,
265 .port_ops = &nv_generic_ops,
266 },
267 /* nforce2/3 */
268 {
269 .sht = &nv_sht,
270 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
271 .pio_mask = NV_PIO_MASK,
272 .mwdma_mask = NV_MWDMA_MASK,
273 .udma_mask = NV_UDMA_MASK,
274 .port_ops = &nv_nf2_ops,
275 },
276 /* ck804 */
277 {
278 .sht = &nv_sht,
279 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
280 .pio_mask = NV_PIO_MASK,
281 .mwdma_mask = NV_MWDMA_MASK,
282 .udma_mask = NV_UDMA_MASK,
283 .port_ops = &nv_ck804_ops,
284 },
285};
286
287MODULE_AUTHOR("NVIDIA");
288MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
289MODULE_LICENSE("GPL");
290MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
291MODULE_VERSION(DRV_VERSION);
292
293static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
294 struct pt_regs *regs)
295{
296 struct ata_host_set *host_set = dev_instance;
297 unsigned int i;
298 unsigned int handled = 0;
299 unsigned long flags;
300
301 spin_lock_irqsave(&host_set->lock, flags);
302
303 for (i = 0; i < host_set->n_ports; i++) {
304 struct ata_port *ap;
305
306 ap = host_set->ports[i];
307 if (ap &&
308 !(ap->flags & ATA_FLAG_DISABLED)) {
309 struct ata_queued_cmd *qc;
310
311 qc = ata_qc_from_tag(ap, ap->active_tag);
312 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
313 handled += ata_host_intr(ap, qc);
314 else
315 // No request pending? Clear interrupt status
316 // anyway, in case there's one pending.
317 ap->ops->check_status(ap);
318 }
319
320 }
321
322 spin_unlock_irqrestore(&host_set->lock, flags);
323
324 return IRQ_RETVAL(handled);
325}
326
327static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
328{
329 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
330 int handled;
331
332 /* freeze if hotplugged */
333 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
334 ata_port_freeze(ap);
335 return 1;
336 }
337
338 /* bail out if not our interrupt */
339 if (!(irq_stat & NV_INT_DEV))
340 return 0;
341
342 /* DEV interrupt w/ no active qc? */
343 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
344 ata_check_status(ap);
345 return 1;
346 }
347
348 /* handle interrupt */
349 handled = ata_host_intr(ap, qc);
350 if (unlikely(!handled)) {
351 /* spurious, clear it */
352 ata_check_status(ap);
353 }
354
355 return 1;
356}
357
358static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
359{
360 int i, handled = 0;
361
362 for (i = 0; i < host_set->n_ports; i++) {
363 struct ata_port *ap = host_set->ports[i];
364
365 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
366 handled += nv_host_intr(ap, irq_stat);
367
368 irq_stat >>= NV_INT_PORT_SHIFT;
369 }
370
371 return IRQ_RETVAL(handled);
372}
373
374static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
375 struct pt_regs *regs)
376{
377 struct ata_host_set *host_set = dev_instance;
378 u8 irq_stat;
379 irqreturn_t ret;
380
381 spin_lock(&host_set->lock);
382 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
383 ret = nv_do_interrupt(host_set, irq_stat);
384 spin_unlock(&host_set->lock);
385
386 return ret;
387}
388
389static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
390 struct pt_regs *regs)
391{
392 struct ata_host_set *host_set = dev_instance;
393 u8 irq_stat;
394 irqreturn_t ret;
395
396 spin_lock(&host_set->lock);
397 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
398 ret = nv_do_interrupt(host_set, irq_stat);
399 spin_unlock(&host_set->lock);
400
401 return ret;
402}
403
404static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
405{
406 if (sc_reg > SCR_CONTROL)
407 return 0xffffffffU;
408
409 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
410}
411
412static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
413{
414 if (sc_reg > SCR_CONTROL)
415 return;
416
417 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
418}
419
420static void nv_nf2_freeze(struct ata_port *ap)
421{
422 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
423 int shift = ap->port_no * NV_INT_PORT_SHIFT;
424 u8 mask;
425
426 mask = inb(scr_addr + NV_INT_ENABLE);
427 mask &= ~(NV_INT_ALL << shift);
428 outb(mask, scr_addr + NV_INT_ENABLE);
429}
430
431static void nv_nf2_thaw(struct ata_port *ap)
432{
433 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
434 int shift = ap->port_no * NV_INT_PORT_SHIFT;
435 u8 mask;
436
437 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
438
439 mask = inb(scr_addr + NV_INT_ENABLE);
440 mask |= (NV_INT_MASK << shift);
441 outb(mask, scr_addr + NV_INT_ENABLE);
442}
443
444static void nv_ck804_freeze(struct ata_port *ap)
445{
446 void __iomem *mmio_base = ap->host_set->mmio_base;
447 int shift = ap->port_no * NV_INT_PORT_SHIFT;
448 u8 mask;
449
450 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
451 mask &= ~(NV_INT_ALL << shift);
452 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
453}
454
455static void nv_ck804_thaw(struct ata_port *ap)
456{
457 void __iomem *mmio_base = ap->host_set->mmio_base;
458 int shift = ap->port_no * NV_INT_PORT_SHIFT;
459 u8 mask;
460
461 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
462
463 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
464 mask |= (NV_INT_MASK << shift);
465 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
466}
467
468static int nv_hardreset(struct ata_port *ap, unsigned int *class)
469{
470 unsigned int dummy;
471
472 /* SATA hardreset fails to retrieve proper device signature on
473 * some controllers. Don't classify on hardreset. For more
474 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
475 */
476 return sata_std_hardreset(ap, &dummy);
477}
478
479static void nv_error_handler(struct ata_port *ap)
480{
481 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
482 nv_hardreset, ata_std_postreset);
483}
484
485static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
486{
487 static int printed_version = 0;
488 struct ata_port_info *ppi;
489 struct ata_probe_ent *probe_ent;
490 int pci_dev_busy = 0;
491 int rc;
492 u32 bar;
493 unsigned long base;
494
495 // Make sure this is a SATA controller by counting the number of bars
496 // (NVIDIA SATA controllers will always have six bars). Otherwise,
497 // it's an IDE controller and we ignore it.
498 for (bar=0; bar<6; bar++)
499 if (pci_resource_start(pdev, bar) == 0)
500 return -ENODEV;
501
502 if (!printed_version++)
503 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
504
505 rc = pci_enable_device(pdev);
506 if (rc)
507 goto err_out;
508
509 rc = pci_request_regions(pdev, DRV_NAME);
510 if (rc) {
511 pci_dev_busy = 1;
512 goto err_out_disable;
513 }
514
515 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
516 if (rc)
517 goto err_out_regions;
518 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
519 if (rc)
520 goto err_out_regions;
521
522 rc = -ENOMEM;
523
524 ppi = &nv_port_info[ent->driver_data];
525 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
526 if (!probe_ent)
527 goto err_out_regions;
528
529 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
530 if (!probe_ent->mmio_base) {
531 rc = -EIO;
532 goto err_out_free_ent;
533 }
534
535 base = (unsigned long)probe_ent->mmio_base;
536
537 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
538 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
539
540 /* enable SATA space for CK804 */
541 if (ent->driver_data == CK804) {
542 u8 regval;
543
544 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
545 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
546 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
547 }
548
549 pci_set_master(pdev);
550
551 rc = ata_device_add(probe_ent);
552 if (rc != NV_PORTS)
553 goto err_out_iounmap;
554
555 kfree(probe_ent);
556
557 return 0;
558
559err_out_iounmap:
560 pci_iounmap(pdev, probe_ent->mmio_base);
561err_out_free_ent:
562 kfree(probe_ent);
563err_out_regions:
564 pci_release_regions(pdev);
565err_out_disable:
566 if (!pci_dev_busy)
567 pci_disable_device(pdev);
568err_out:
569 return rc;
570}
571
572static void nv_ck804_host_stop(struct ata_host_set *host_set)
573{
574 struct pci_dev *pdev = to_pci_dev(host_set->dev);
575 u8 regval;
576
577 /* disable SATA space for CK804 */
578 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
579 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
580 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
581
582 ata_pci_host_stop(host_set);
583}
584
585static int __init nv_init(void)
586{
587 return pci_module_init(&nv_pci_driver);
588}
589
590static void __exit nv_exit(void)
591{
592 pci_unregister_driver(&nv_pci_driver);
593}
594
595module_init(nv_init);
596module_exit(nv_exit);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
deleted file mode 100644
index b2b6ed5216e0..000000000000
--- a/drivers/scsi/sata_promise.c
+++ /dev/null
@@ -1,837 +0,0 @@
1/*
2 * sata_promise.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04"
50
51
52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
63
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10),
66
67 board_2037x = 0, /* FastTrak S150 TX2plus */
68 board_20319 = 1, /* FastTrak S150 TX4 */
69 board_20619 = 2, /* FastTrak TX4000 */
70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
73
74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
75
76 PDC_RESET = (1 << 11), /* HDMA reset */
77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
81};
82
83
84struct pdc_port_priv {
85 u8 *pkt;
86 dma_addr_t pkt_dma;
87};
88
89struct pdc_host_priv {
90 int hotplug_offset;
91};
92
93static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
94static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
95static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
96static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
97static void pdc_eng_timeout(struct ata_port *ap);
98static int pdc_port_start(struct ata_port *ap);
99static void pdc_port_stop(struct ata_port *ap);
100static void pdc_pata_phy_reset(struct ata_port *ap);
101static void pdc_sata_phy_reset(struct ata_port *ap);
102static void pdc_qc_prep(struct ata_queued_cmd *qc);
103static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host_set *host_set);
108
109
110static struct scsi_host_template pdc_ata_sht = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .ioctl = ata_scsi_ioctl,
114 .queuecommand = ata_scsi_queuecmd,
115 .can_queue = ATA_DEF_QUEUE,
116 .this_id = ATA_SHT_THIS_ID,
117 .sg_tablesize = LIBATA_MAX_PRD,
118 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
119 .emulated = ATA_SHT_EMULATED,
120 .use_clustering = ATA_SHT_USE_CLUSTERING,
121 .proc_name = DRV_NAME,
122 .dma_boundary = ATA_DMA_BOUNDARY,
123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
125 .bios_param = ata_std_bios_param,
126};
127
128static const struct ata_port_operations pdc_sata_ops = {
129 .port_disable = ata_port_disable,
130 .tf_load = pdc_tf_load_mmio,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = pdc_exec_command_mmio,
134 .dev_select = ata_std_dev_select,
135
136 .phy_reset = pdc_sata_phy_reset,
137
138 .qc_prep = pdc_qc_prep,
139 .qc_issue = pdc_qc_issue_prot,
140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
142 .irq_handler = pdc_interrupt,
143 .irq_clear = pdc_irq_clear,
144
145 .scr_read = pdc_sata_scr_read,
146 .scr_write = pdc_sata_scr_write,
147 .port_start = pdc_port_start,
148 .port_stop = pdc_port_stop,
149 .host_stop = pdc_host_stop,
150};
151
152static const struct ata_port_operations pdc_pata_ops = {
153 .port_disable = ata_port_disable,
154 .tf_load = pdc_tf_load_mmio,
155 .tf_read = ata_tf_read,
156 .check_status = ata_check_status,
157 .exec_command = pdc_exec_command_mmio,
158 .dev_select = ata_std_dev_select,
159
160 .phy_reset = pdc_pata_phy_reset,
161
162 .qc_prep = pdc_qc_prep,
163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
165 .eng_timeout = pdc_eng_timeout,
166 .irq_handler = pdc_interrupt,
167 .irq_clear = pdc_irq_clear,
168
169 .port_start = pdc_port_start,
170 .port_stop = pdc_port_stop,
171 .host_stop = pdc_host_stop,
172};
173
174static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */
176 {
177 .sht = &pdc_ata_sht,
178 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
182 .port_ops = &pdc_sata_ops,
183 },
184
185 /* board_20319 */
186 {
187 .sht = &pdc_ata_sht,
188 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
192 .port_ops = &pdc_sata_ops,
193 },
194
195 /* board_20619 */
196 {
197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_pata_ops,
203 },
204
205 /* board_20771 */
206 {
207 .sht = &pdc_ata_sht,
208 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
212 .port_ops = &pdc_sata_ops,
213 },
214
215 /* board_2057x */
216 {
217 .sht = &pdc_ata_sht,
218 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
222 .port_ops = &pdc_sata_ops,
223 },
224
225 /* board_40518 */
226 {
227 .sht = &pdc_ata_sht,
228 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
232 .port_ops = &pdc_sata_ops,
233 },
234};
235
236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
240 board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
242 board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 board_2037x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
246 board_2037x },
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
248 board_2037x },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
250 board_2057x },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
252 board_2057x },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
254 board_2037x },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271
272 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
273 board_20771 },
274 { } /* terminate list */
275};
276
277
278static struct pci_driver pdc_ata_pci_driver = {
279 .name = DRV_NAME,
280 .id_table = pdc_ata_pci_tbl,
281 .probe = pdc_ata_init_one,
282 .remove = ata_pci_remove_one,
283};
284
285
286static int pdc_port_start(struct ata_port *ap)
287{
288 struct device *dev = ap->host_set->dev;
289 struct pdc_port_priv *pp;
290 int rc;
291
292 rc = ata_port_start(ap);
293 if (rc)
294 return rc;
295
296 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
297 if (!pp) {
298 rc = -ENOMEM;
299 goto err_out;
300 }
301
302 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
303 if (!pp->pkt) {
304 rc = -ENOMEM;
305 goto err_out_kfree;
306 }
307
308 ap->private_data = pp;
309
310 return 0;
311
312err_out_kfree:
313 kfree(pp);
314err_out:
315 ata_port_stop(ap);
316 return rc;
317}
318
319
320static void pdc_port_stop(struct ata_port *ap)
321{
322 struct device *dev = ap->host_set->dev;
323 struct pdc_port_priv *pp = ap->private_data;
324
325 ap->private_data = NULL;
326 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
327 kfree(pp);
328 ata_port_stop(ap);
329}
330
331
332static void pdc_host_stop(struct ata_host_set *host_set)
333{
334 struct pdc_host_priv *hp = host_set->private_data;
335
336 ata_pci_host_stop(host_set);
337
338 kfree(hp);
339}
340
341
342static void pdc_reset_port(struct ata_port *ap)
343{
344 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
345 unsigned int i;
346 u32 tmp;
347
348 for (i = 11; i > 0; i--) {
349 tmp = readl(mmio);
350 if (tmp & PDC_RESET)
351 break;
352
353 udelay(100);
354
355 tmp |= PDC_RESET;
356 writel(tmp, mmio);
357 }
358
359 tmp &= ~PDC_RESET;
360 writel(tmp, mmio);
361 readl(mmio); /* flush */
362}
363
364static void pdc_sata_phy_reset(struct ata_port *ap)
365{
366 pdc_reset_port(ap);
367 sata_phy_reset(ap);
368}
369
370static void pdc_pata_cbl_detect(struct ata_port *ap)
371{
372 u8 tmp;
373 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
374
375 tmp = readb(mmio);
376
377 if (tmp & 0x01) {
378 ap->cbl = ATA_CBL_PATA40;
379 ap->udma_mask &= ATA_UDMA_MASK_40C;
380 } else
381 ap->cbl = ATA_CBL_PATA80;
382}
383
384static void pdc_pata_phy_reset(struct ata_port *ap)
385{
386 pdc_pata_cbl_detect(ap);
387 pdc_reset_port(ap);
388 ata_port_probe(ap);
389 ata_bus_reset(ap);
390}
391
392static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
393{
394 if (sc_reg > SCR_CONTROL)
395 return 0xffffffffU;
396 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
397}
398
399
400static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
401 u32 val)
402{
403 if (sc_reg > SCR_CONTROL)
404 return;
405 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
406}
407
408static void pdc_qc_prep(struct ata_queued_cmd *qc)
409{
410 struct pdc_port_priv *pp = qc->ap->private_data;
411 unsigned int i;
412
413 VPRINTK("ENTER\n");
414
415 switch (qc->tf.protocol) {
416 case ATA_PROT_DMA:
417 ata_qc_prep(qc);
418 /* fall through */
419
420 case ATA_PROT_NODATA:
421 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
422 qc->dev->devno, pp->pkt);
423
424 if (qc->tf.flags & ATA_TFLAG_LBA48)
425 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
426 else
427 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
428
429 pdc_pkt_footer(&qc->tf, pp->pkt, i);
430 break;
431
432 default:
433 break;
434 }
435}
436
437static void pdc_eng_timeout(struct ata_port *ap)
438{
439 struct ata_host_set *host_set = ap->host_set;
440 u8 drv_stat;
441 struct ata_queued_cmd *qc;
442 unsigned long flags;
443
444 DPRINTK("ENTER\n");
445
446 spin_lock_irqsave(&host_set->lock, flags);
447
448 qc = ata_qc_from_tag(ap, ap->active_tag);
449
450 switch (qc->tf.protocol) {
451 case ATA_PROT_DMA:
452 case ATA_PROT_NODATA:
453 ata_port_printk(ap, KERN_ERR, "command timeout\n");
454 drv_stat = ata_wait_idle(ap);
455 qc->err_mask |= __ac_err_mask(drv_stat);
456 break;
457
458 default:
459 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
460
461 ata_port_printk(ap, KERN_ERR,
462 "unknown timeout, cmd 0x%x stat 0x%x\n",
463 qc->tf.command, drv_stat);
464
465 qc->err_mask |= ac_err_mask(drv_stat);
466 break;
467 }
468
469 spin_unlock_irqrestore(&host_set->lock, flags);
470 ata_eh_qc_complete(qc);
471 DPRINTK("EXIT\n");
472}
473
474static inline unsigned int pdc_host_intr( struct ata_port *ap,
475 struct ata_queued_cmd *qc)
476{
477 unsigned int handled = 0;
478 u32 tmp;
479 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
480
481 tmp = readl(mmio);
482 if (tmp & PDC_ERR_MASK) {
483 qc->err_mask |= AC_ERR_DEV;
484 pdc_reset_port(ap);
485 }
486
487 switch (qc->tf.protocol) {
488 case ATA_PROT_DMA:
489 case ATA_PROT_NODATA:
490 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
491 ata_qc_complete(qc);
492 handled = 1;
493 break;
494
495 default:
496 ap->stats.idle_irq++;
497 break;
498 }
499
500 return handled;
501}
502
503static void pdc_irq_clear(struct ata_port *ap)
504{
505 struct ata_host_set *host_set = ap->host_set;
506 void __iomem *mmio = host_set->mmio_base;
507
508 readl(mmio + PDC_INT_SEQMASK);
509}
510
511static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
512{
513 struct ata_host_set *host_set = dev_instance;
514 struct ata_port *ap;
515 u32 mask = 0;
516 unsigned int i, tmp;
517 unsigned int handled = 0;
518 void __iomem *mmio_base;
519
520 VPRINTK("ENTER\n");
521
522 if (!host_set || !host_set->mmio_base) {
523 VPRINTK("QUICK EXIT\n");
524 return IRQ_NONE;
525 }
526
527 mmio_base = host_set->mmio_base;
528
529 /* reading should also clear interrupts */
530 mask = readl(mmio_base + PDC_INT_SEQMASK);
531
532 if (mask == 0xffffffff) {
533 VPRINTK("QUICK EXIT 2\n");
534 return IRQ_NONE;
535 }
536
537 spin_lock(&host_set->lock);
538
539 mask &= 0xffff; /* only 16 tags possible */
540 if (!mask) {
541 VPRINTK("QUICK EXIT 3\n");
542 goto done_irq;
543 }
544
545 writel(mask, mmio_base + PDC_INT_SEQMASK);
546
547 for (i = 0; i < host_set->n_ports; i++) {
548 VPRINTK("port %u\n", i);
549 ap = host_set->ports[i];
550 tmp = mask & (1 << (i + 1));
551 if (tmp && ap &&
552 !(ap->flags & ATA_FLAG_DISABLED)) {
553 struct ata_queued_cmd *qc;
554
555 qc = ata_qc_from_tag(ap, ap->active_tag);
556 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
557 handled += pdc_host_intr(ap, qc);
558 }
559 }
560
561 VPRINTK("EXIT\n");
562
563done_irq:
564 spin_unlock(&host_set->lock);
565 return IRQ_RETVAL(handled);
566}
567
568static inline void pdc_packet_start(struct ata_queued_cmd *qc)
569{
570 struct ata_port *ap = qc->ap;
571 struct pdc_port_priv *pp = ap->private_data;
572 unsigned int port_no = ap->port_no;
573 u8 seq = (u8) (port_no + 1);
574
575 VPRINTK("ENTER, ap %p\n", ap);
576
577 writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
578 readl(ap->host_set->mmio_base + (seq * 4)); /* flush */
579
580 pp->pkt[2] = seq;
581 wmb(); /* flush PRD, pkt writes */
582 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
583 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
584}
585
586static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
587{
588 switch (qc->tf.protocol) {
589 case ATA_PROT_DMA:
590 case ATA_PROT_NODATA:
591 pdc_packet_start(qc);
592 return 0;
593
594 case ATA_PROT_ATAPI_DMA:
595 BUG();
596 break;
597
598 default:
599 break;
600 }
601
602 return ata_qc_issue_prot(qc);
603}
604
605static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
606{
607 WARN_ON (tf->protocol == ATA_PROT_DMA ||
608 tf->protocol == ATA_PROT_NODATA);
609 ata_tf_load(ap, tf);
610}
611
612
613static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
614{
615 WARN_ON (tf->protocol == ATA_PROT_DMA ||
616 tf->protocol == ATA_PROT_NODATA);
617 ata_exec_command(ap, tf);
618}
619
620
621static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
622{
623 port->cmd_addr = base;
624 port->data_addr = base;
625 port->feature_addr =
626 port->error_addr = base + 0x4;
627 port->nsect_addr = base + 0x8;
628 port->lbal_addr = base + 0xc;
629 port->lbam_addr = base + 0x10;
630 port->lbah_addr = base + 0x14;
631 port->device_addr = base + 0x18;
632 port->command_addr =
633 port->status_addr = base + 0x1c;
634 port->altstatus_addr =
635 port->ctl_addr = base + 0x38;
636}
637
638
639static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
640{
641 void __iomem *mmio = pe->mmio_base;
642 struct pdc_host_priv *hp = pe->private_data;
643 int hotplug_offset = hp->hotplug_offset;
644 u32 tmp;
645
646 /*
647 * Except for the hotplug stuff, this is voodoo from the
648 * Promise driver. Label this entire section
649 * "TODO: figure out why we do this"
650 */
651
652 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */
653 tmp = readl(mmio + PDC_FLASH_CTL);
654 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
655 writel(tmp, mmio + PDC_FLASH_CTL);
656
657 /* clear plug/unplug flags for all ports */
658 tmp = readl(mmio + hotplug_offset);
659 writel(tmp | 0xff, mmio + hotplug_offset);
660
661 /* mask plug/unplug ints */
662 tmp = readl(mmio + hotplug_offset);
663 writel(tmp | 0xff0000, mmio + hotplug_offset);
664
665 /* reduce TBG clock to 133 Mhz. */
666 tmp = readl(mmio + PDC_TBG_MODE);
667 tmp &= ~0x30000; /* clear bit 17, 16*/
668 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
669 writel(tmp, mmio + PDC_TBG_MODE);
670
671 readl(mmio + PDC_TBG_MODE); /* flush */
672 msleep(10);
673
674 /* adjust slew rate control register. */
675 tmp = readl(mmio + PDC_SLEW_CTL);
676 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
677 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
678 writel(tmp, mmio + PDC_SLEW_CTL);
679}
680
681static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
682{
683 static int printed_version;
684 struct ata_probe_ent *probe_ent = NULL;
685 struct pdc_host_priv *hp;
686 unsigned long base;
687 void __iomem *mmio_base;
688 unsigned int board_idx = (unsigned int) ent->driver_data;
689 int pci_dev_busy = 0;
690 int rc;
691
692 if (!printed_version++)
693 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
694
695 rc = pci_enable_device(pdev);
696 if (rc)
697 return rc;
698
699 rc = pci_request_regions(pdev, DRV_NAME);
700 if (rc) {
701 pci_dev_busy = 1;
702 goto err_out;
703 }
704
705 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
706 if (rc)
707 goto err_out_regions;
708 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
709 if (rc)
710 goto err_out_regions;
711
712 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
713 if (probe_ent == NULL) {
714 rc = -ENOMEM;
715 goto err_out_regions;
716 }
717
718 probe_ent->dev = pci_dev_to_dev(pdev);
719 INIT_LIST_HEAD(&probe_ent->node);
720
721 mmio_base = pci_iomap(pdev, 3, 0);
722 if (mmio_base == NULL) {
723 rc = -ENOMEM;
724 goto err_out_free_ent;
725 }
726 base = (unsigned long) mmio_base;
727
728 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
729 if (hp == NULL) {
730 rc = -ENOMEM;
731 goto err_out_free_ent;
732 }
733
734 /* Set default hotplug offset */
735 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
736 probe_ent->private_data = hp;
737
738 probe_ent->sht = pdc_port_info[board_idx].sht;
739 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
740 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
741 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
742 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
743 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
744
745 probe_ent->irq = pdev->irq;
746 probe_ent->irq_flags = SA_SHIRQ;
747 probe_ent->mmio_base = mmio_base;
748
749 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
750 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
751
752 probe_ent->port[0].scr_addr = base + 0x400;
753 probe_ent->port[1].scr_addr = base + 0x500;
754
755 /* notice 4-port boards */
756 switch (board_idx) {
757 case board_40518:
758 /* Override hotplug offset for SATAII150 */
759 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
760 /* Fall through */
761 case board_20319:
762 probe_ent->n_ports = 4;
763
764 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
765 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
766
767 probe_ent->port[2].scr_addr = base + 0x600;
768 probe_ent->port[3].scr_addr = base + 0x700;
769 break;
770 case board_2057x:
771 /* Override hotplug offset for SATAII150 */
772 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
773 /* Fall through */
774 case board_2037x:
775 probe_ent->n_ports = 2;
776 break;
777 case board_20771:
778 probe_ent->n_ports = 2;
779 break;
780 case board_20619:
781 probe_ent->n_ports = 4;
782
783 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
784 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
785
786 probe_ent->port[2].scr_addr = base + 0x600;
787 probe_ent->port[3].scr_addr = base + 0x700;
788 break;
789 default:
790 BUG();
791 break;
792 }
793
794 pci_set_master(pdev);
795
796 /* initialize adapter */
797 pdc_host_init(board_idx, probe_ent);
798
799 /* FIXME: Need any other frees than hp? */
800 if (!ata_device_add(probe_ent))
801 kfree(hp);
802
803 kfree(probe_ent);
804
805 return 0;
806
807err_out_free_ent:
808 kfree(probe_ent);
809err_out_regions:
810 pci_release_regions(pdev);
811err_out:
812 if (!pci_dev_busy)
813 pci_disable_device(pdev);
814 return rc;
815}
816
817
818static int __init pdc_ata_init(void)
819{
820 return pci_module_init(&pdc_ata_pci_driver);
821}
822
823
824static void __exit pdc_ata_exit(void)
825{
826 pci_unregister_driver(&pdc_ata_pci_driver);
827}
828
829
830MODULE_AUTHOR("Jeff Garzik");
831MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
832MODULE_LICENSE("GPL");
833MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
834MODULE_VERSION(DRV_VERSION);
835
836module_init(pdc_ata_init);
837module_exit(pdc_ata_exit);
diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h
deleted file mode 100644
index 6ee5e190262d..000000000000
--- a/drivers/scsi/sata_promise.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * sata_promise.h - Promise SATA common definitions and inline funcs
3 *
4 * Copyright 2003-2004 Red Hat, Inc.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 */
26
27#ifndef __SATA_PROMISE_H__
28#define __SATA_PROMISE_H__
29
30#include <linux/ata.h>
31
32enum pdc_packet_bits {
33 PDC_PKT_READ = (1 << 2),
34 PDC_PKT_NODATA = (1 << 3),
35
36 PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
37 PDC_PKT_CLEAR_BSY = (1 << 4),
38 PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
39 PDC_LAST_REG = (1 << 3),
40
41 PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
42};
43
44static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
45 dma_addr_t sg_table,
46 unsigned int devno, u8 *buf)
47{
48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf;
50
51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2)
53 */
54 switch (tf->protocol) {
55 case ATA_PROT_DMA:
56 if (!(tf->flags & ATA_TFLAG_WRITE))
57 buf32[0] = cpu_to_le32(PDC_PKT_READ);
58 else
59 buf32[0] = 0;
60 break;
61
62 case ATA_PROT_NODATA:
63 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
64 break;
65
66 default:
67 BUG();
68 break;
69 }
70
71 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
72 buf32[2] = 0; /* no next-packet */
73
74 if (devno == 0)
75 dev_reg = ATA_DEVICE_OBS;
76 else
77 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
78
79 /* select device */
80 buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
81 buf[13] = dev_reg;
82
83 /* device control register */
84 buf[14] = (1 << 5) | PDC_REG_DEVCTL;
85 buf[15] = tf->ctl;
86
87 return 16; /* offset of next byte */
88}
89
90static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
91 unsigned int i)
92{
93 if (tf->flags & ATA_TFLAG_DEVICE) {
94 buf[i++] = (1 << 5) | ATA_REG_DEVICE;
95 buf[i++] = tf->device;
96 }
97
98 /* and finally the command itself; also includes end-of-pkt marker */
99 buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
100 buf[i++] = tf->command;
101
102 return i;
103}
104
105static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
106{
107 /* the "(1 << 5)" should be read "(count << 5)" */
108
109 /* ATA command block registers */
110 buf[i++] = (1 << 5) | ATA_REG_FEATURE;
111 buf[i++] = tf->feature;
112
113 buf[i++] = (1 << 5) | ATA_REG_NSECT;
114 buf[i++] = tf->nsect;
115
116 buf[i++] = (1 << 5) | ATA_REG_LBAL;
117 buf[i++] = tf->lbal;
118
119 buf[i++] = (1 << 5) | ATA_REG_LBAM;
120 buf[i++] = tf->lbam;
121
122 buf[i++] = (1 << 5) | ATA_REG_LBAH;
123 buf[i++] = tf->lbah;
124
125 return i;
126}
127
128static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
129{
130 /* the "(2 << 5)" should be read "(count << 5)" */
131
132 /* ATA command block registers */
133 buf[i++] = (2 << 5) | ATA_REG_FEATURE;
134 buf[i++] = tf->hob_feature;
135 buf[i++] = tf->feature;
136
137 buf[i++] = (2 << 5) | ATA_REG_NSECT;
138 buf[i++] = tf->hob_nsect;
139 buf[i++] = tf->nsect;
140
141 buf[i++] = (2 << 5) | ATA_REG_LBAL;
142 buf[i++] = tf->hob_lbal;
143 buf[i++] = tf->lbal;
144
145 buf[i++] = (2 << 5) | ATA_REG_LBAM;
146 buf[i++] = tf->hob_lbam;
147 buf[i++] = tf->lbam;
148
149 buf[i++] = (2 << 5) | ATA_REG_LBAH;
150 buf[i++] = tf->hob_lbah;
151 buf[i++] = tf->lbah;
152
153 return i;
154}
155
156
157#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
deleted file mode 100644
index 98ddc25655f0..000000000000
--- a/drivers/scsi/sata_qstor.c
+++ /dev/null
@@ -1,730 +0,0 @@
1/*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06"
45
46enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
60
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
65
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
76
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
83
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
87
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
93
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
97
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
100};
101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
107
108struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
112};
113
114static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap);
127static void qs_irq_clear(struct ata_port *ap);
128static void qs_eng_timeout(struct ata_port *ap);
129
130static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .can_queue = ATA_DEF_QUEUE,
136 .this_id = ATA_SHT_THIS_ID,
137 .sg_tablesize = QS_MAX_PRD,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED,
140 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
141 .use_clustering = ENABLE_CLUSTERING,
142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
146 .bios_param = ata_std_bios_param,
147};
148
149static const struct ata_port_operations qs_ata_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .check_atapi_dma = qs_check_atapi_dma,
155 .exec_command = ata_exec_command,
156 .dev_select = ata_std_dev_select,
157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear,
164 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write,
166 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status,
171};
172
173static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */
175 {
176 .sht = &qs_ata_sht,
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops,
184 },
185};
186
187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
189 board_2068_idx },
190
191 { } /* terminate list */
192};
193
194static struct pci_driver qs_ata_pci_driver = {
195 .name = DRV_NAME,
196 .id_table = qs_ata_pci_tbl,
197 .probe = qs_ata_init_one,
198 .remove = ata_pci_remove_one,
199};
200
201static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
202{
203 return 1; /* ATAPI DMA not supported */
204}
205
206static void qs_bmdma_stop(struct ata_queued_cmd *qc)
207{
208 /* nothing */
209}
210
211static u8 qs_bmdma_status(struct ata_port *ap)
212{
213 return 0;
214}
215
216static void qs_irq_clear(struct ata_port *ap)
217{
218 /* nothing */
219}
220
221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{
223 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */
227}
228
229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{
231 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */
235 qs_enter_reg_mode(ap);
236}
237
238static void qs_phy_reset(struct ata_port *ap)
239{
240 struct qs_port_priv *pp = ap->private_data;
241
242 pp->state = qs_state_idle;
243 qs_reset_channel_logic(ap);
244 sata_phy_reset(ap);
245}
246
247static void qs_eng_timeout(struct ata_port *ap)
248{
249 struct qs_port_priv *pp = ap->private_data;
250
251 if (pp->state != qs_state_idle) /* healthy paranoia */
252 pp->state = qs_state_mmio;
253 qs_reset_channel_logic(ap);
254 ata_eng_timeout(ap);
255}
256
257static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
258{
259 if (sc_reg > SCR_CONTROL)
260 return ~0U;
261 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
262}
263
264static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
265{
266 if (sc_reg > SCR_CONTROL)
267 return;
268 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
269}
270
271static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
272{
273 struct scatterlist *sg;
274 struct ata_port *ap = qc->ap;
275 struct qs_port_priv *pp = ap->private_data;
276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278
279 WARN_ON(qc->__sg == NULL);
280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281
282 nelem = 0;
283 ata_for_each_sg(sg, qc) {
284 u64 addr;
285 u32 len;
286
287 addr = sg_dma_address(sg);
288 *(__le64 *)prd = cpu_to_le64(addr);
289 prd += sizeof(u64);
290
291 len = sg_dma_len(sg);
292 *(__le32 *)prd = cpu_to_le32(len);
293 prd += sizeof(u64);
294
295 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
296 (unsigned long long)addr, len);
297 nelem++;
298 }
299
300 return nelem;
301}
302
303static void qs_qc_prep(struct ata_queued_cmd *qc)
304{
305 struct qs_port_priv *pp = qc->ap->private_data;
306 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
307 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
308 u64 addr;
309 unsigned int nelem;
310
311 VPRINTK("ENTER\n");
312
313 qs_enter_reg_mode(qc->ap);
314 if (qc->tf.protocol != ATA_PROT_DMA) {
315 ata_qc_prep(qc);
316 return;
317 }
318
319 nelem = qs_fill_sg(qc);
320
321 if ((qc->tf.flags & ATA_TFLAG_WRITE))
322 hflags |= QS_HF_DIRO;
323 if ((qc->tf.flags & ATA_TFLAG_LBA48))
324 dflags |= QS_DF_ELBA;
325
326 /* host control block (HCB) */
327 buf[ 0] = QS_HCB_HDR;
328 buf[ 1] = hflags;
329 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
330 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
331 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
332 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
333
334 /* device control block (DCB) */
335 buf[24] = QS_DCB_HDR;
336 buf[28] = dflags;
337
338 /* frame information structure (FIS) */
339 ata_tf_to_fis(&qc->tf, &buf[32], 0);
340}
341
342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{
344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
346
347 VPRINTK("ENTER, ap %p\n", ap);
348
349 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
350 wmb(); /* flush PRDs and pkt to memory */
351 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
352 readl(chan + QS_CCT_CFF); /* flush */
353}
354
355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{
357 struct qs_port_priv *pp = qc->ap->private_data;
358
359 switch (qc->tf.protocol) {
360 case ATA_PROT_DMA:
361
362 pp->state = qs_state_pkt;
363 qs_packet_start(qc);
364 return 0;
365
366 case ATA_PROT_ATAPI_DMA:
367 BUG();
368 break;
369
370 default:
371 break;
372 }
373
374 pp->state = qs_state_mmio;
375 return ata_qc_issue_prot(qc);
376}
377
378static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
379{
380 unsigned int handled = 0;
381 u8 sFFE;
382 u8 __iomem *mmio_base = host_set->mmio_base;
383
384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
386 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
387 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
388 sFFE = sff1 >> 31; /* empty flag */
389
390 if (sEVLD) {
391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host_set->ports[port_no];
395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1;
399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
400 struct ata_queued_cmd *qc;
401 struct qs_port_priv *pp = ap->private_data;
402 if (!pp || pp->state != qs_state_pkt)
403 continue;
404 qc = ata_qc_from_tag(ap, ap->active_tag);
405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
406 switch (sHST) {
407 case 0: /* successful CPB */
408 case 3: /* device error */
409 pp->state = qs_state_idle;
410 qs_enter_reg_mode(qc->ap);
411 qc->err_mask |= ac_err_mask(sDST);
412 ata_qc_complete(qc);
413 break;
414 default:
415 break;
416 }
417 }
418 }
419 }
420 } while (!sFFE);
421 return handled;
422}
423
424static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
425{
426 unsigned int handled = 0, port_no;
427
428 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
429 struct ata_port *ap;
430 ap = host_set->ports[port_no];
431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc;
434 struct qs_port_priv *pp = ap->private_data;
435 if (!pp || pp->state != qs_state_mmio)
436 continue;
437 qc = ata_qc_from_tag(ap, ap->active_tag);
438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
439
440 /* check main status, clearing INTRQ */
441 u8 status = ata_check_status(ap);
442 if ((status & ATA_BUSY))
443 continue;
444 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
445 ap->id, qc->tf.protocol, status);
446
447 /* complete taskfile transaction */
448 pp->state = qs_state_idle;
449 qc->err_mask |= ac_err_mask(status);
450 ata_qc_complete(qc);
451 handled = 1;
452 }
453 }
454 }
455 return handled;
456}
457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{
460 struct ata_host_set *host_set = dev_instance;
461 unsigned int handled = 0;
462
463 VPRINTK("ENTER\n");
464
465 spin_lock(&host_set->lock);
466 handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
467 spin_unlock(&host_set->lock);
468
469 VPRINTK("EXIT\n");
470
471 return IRQ_RETVAL(handled);
472}
473
474static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
475{
476 port->cmd_addr =
477 port->data_addr = base + 0x400;
478 port->error_addr =
479 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
480 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
481 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
482 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
483 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
484 port->device_addr = base + 0x430;
485 port->status_addr =
486 port->command_addr = base + 0x438;
487 port->altstatus_addr =
488 port->ctl_addr = base + 0x440;
489 port->scr_addr = base + 0xc00;
490}
491
492static int qs_port_start(struct ata_port *ap)
493{
494 struct device *dev = ap->host_set->dev;
495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host_set->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr;
499 int rc;
500
501 rc = ata_port_start(ap);
502 if (rc)
503 return rc;
504 qs_enter_reg_mode(ap);
505 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
506 if (!pp) {
507 rc = -ENOMEM;
508 goto err_out;
509 }
510 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
511 GFP_KERNEL);
512 if (!pp->pkt) {
513 rc = -ENOMEM;
514 goto err_out_kfree;
515 }
516 memset(pp->pkt, 0, QS_PKT_BYTES);
517 ap->private_data = pp;
518
519 addr = (u64)pp->pkt_dma;
520 writel((u32) addr, chan + QS_CCF_CPBA);
521 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
522 return 0;
523
524err_out_kfree:
525 kfree(pp);
526err_out:
527 ata_port_stop(ap);
528 return rc;
529}
530
531static void qs_port_stop(struct ata_port *ap)
532{
533 struct device *dev = ap->host_set->dev;
534 struct qs_port_priv *pp = ap->private_data;
535
536 if (pp != NULL) {
537 ap->private_data = NULL;
538 if (pp->pkt != NULL)
539 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
540 pp->pkt_dma);
541 kfree(pp);
542 }
543 ata_port_stop(ap);
544}
545
546static void qs_host_stop(struct ata_host_set *host_set)
547{
548 void __iomem *mmio_base = host_set->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host_set->dev);
550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
553
554 pci_iounmap(pdev, mmio_base);
555}
556
557static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
558{
559 void __iomem *mmio_base = pe->mmio_base;
560 unsigned int port_no;
561
562 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
563 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
564
565 /* reset each channel in turn */
566 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
567 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
568 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
569 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
570 readb(chan + QS_CCT_CTR0); /* flush */
571 }
572 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
573
574 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
575 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
576 /* set FIFO depths to same settings as Windows driver */
577 writew(32, chan + QS_CFC_HUFT);
578 writew(32, chan + QS_CFC_HDFT);
579 writew(10, chan + QS_CFC_DUFT);
580 writew( 8, chan + QS_CFC_DDFT);
581 /* set CPB size in bytes, as a power of two */
582 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
583 }
584 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
585}
586
587/*
588 * The QStor understands 64-bit buses, and uses 64-bit fields
589 * for DMA pointers regardless of bus width. We just have to
590 * make sure our DMA masks are set appropriately for whatever
591 * bridge lies between us and the QStor, and then the DMA mapping
592 * code will ensure we only ever "see" appropriate buffer addresses.
593 * If we're 32-bit limited somewhere, then our 64-bit fields will
594 * just end up with zeros in the upper 32-bits, without any special
595 * logic required outside of this routine (below).
596 */
597static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
598{
599 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
600 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
601
602 if (have_64bit_bus &&
603 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
604 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
605 if (rc) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
607 if (rc) {
608 dev_printk(KERN_ERR, &pdev->dev,
609 "64-bit DMA enable failed\n");
610 return rc;
611 }
612 }
613 } else {
614 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "32-bit DMA enable failed\n");
618 return rc;
619 }
620 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
621 if (rc) {
622 dev_printk(KERN_ERR, &pdev->dev,
623 "32-bit consistent DMA enable failed\n");
624 return rc;
625 }
626 }
627 return 0;
628}
629
630static int qs_ata_init_one(struct pci_dev *pdev,
631 const struct pci_device_id *ent)
632{
633 static int printed_version;
634 struct ata_probe_ent *probe_ent = NULL;
635 void __iomem *mmio_base;
636 unsigned int board_idx = (unsigned int) ent->driver_data;
637 int rc, port_no;
638
639 if (!printed_version++)
640 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
641
642 rc = pci_enable_device(pdev);
643 if (rc)
644 return rc;
645
646 rc = pci_request_regions(pdev, DRV_NAME);
647 if (rc)
648 goto err_out;
649
650 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
651 rc = -ENODEV;
652 goto err_out_regions;
653 }
654
655 mmio_base = pci_iomap(pdev, 4, 0);
656 if (mmio_base == NULL) {
657 rc = -ENOMEM;
658 goto err_out_regions;
659 }
660
661 rc = qs_set_dma_masks(pdev, mmio_base);
662 if (rc)
663 goto err_out_iounmap;
664
665 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
666 if (probe_ent == NULL) {
667 rc = -ENOMEM;
668 goto err_out_iounmap;
669 }
670
671 memset(probe_ent, 0, sizeof(*probe_ent));
672 probe_ent->dev = pci_dev_to_dev(pdev);
673 INIT_LIST_HEAD(&probe_ent->node);
674
675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->host_flags = qs_port_info[board_idx].host_flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
680 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
681
682 probe_ent->irq = pdev->irq;
683 probe_ent->irq_flags = SA_SHIRQ;
684 probe_ent->mmio_base = mmio_base;
685 probe_ent->n_ports = QS_PORTS;
686
687 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
688 unsigned long chan = (unsigned long)mmio_base +
689 (port_no * 0x4000);
690 qs_ata_setup_port(&probe_ent->port[port_no], chan);
691 }
692
693 pci_set_master(pdev);
694
695 /* initialize adapter */
696 qs_host_init(board_idx, probe_ent);
697
698 rc = ata_device_add(probe_ent);
699 kfree(probe_ent);
700 if (rc != QS_PORTS)
701 goto err_out_iounmap;
702 return 0;
703
704err_out_iounmap:
705 pci_iounmap(pdev, mmio_base);
706err_out_regions:
707 pci_release_regions(pdev);
708err_out:
709 pci_disable_device(pdev);
710 return rc;
711}
712
713static int __init qs_ata_init(void)
714{
715 return pci_module_init(&qs_ata_pci_driver);
716}
717
718static void __exit qs_ata_exit(void)
719{
720 pci_unregister_driver(&qs_ata_pci_driver);
721}
722
723MODULE_AUTHOR("Mark Lord");
724MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
725MODULE_LICENSE("GPL");
726MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
727MODULE_VERSION(DRV_VERSION);
728
729module_init(qs_ata_init);
730module_exit(qs_ata_exit);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
deleted file mode 100644
index bc9f918a7f28..000000000000
--- a/drivers/scsi/sata_sil.c
+++ /dev/null
@@ -1,683 +0,0 @@
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "1.0"
50
51enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
56 SIL_FLAG_MOD15WRITE = (1 << 30),
57
58 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
59 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
60
61 /*
62 * Controller IDs
63 */
64 sil_3112 = 0,
65 sil_3512 = 1,
66 sil_3114 = 2,
67
68 /*
69 * Register offsets
70 */
71 SIL_SYSCFG = 0x48,
72
73 /*
74 * Register bits
75 */
76 /* SYSCFG */
77 SIL_MASK_IDE0_INT = (1 << 22),
78 SIL_MASK_IDE1_INT = (1 << 23),
79 SIL_MASK_IDE2_INT = (1 << 24),
80 SIL_MASK_IDE3_INT = (1 << 25),
81 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
82 SIL_MASK_4PORT = SIL_MASK_2PORT |
83 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
84
85 /* BMDMA/BMDMA2 */
86 SIL_INTR_STEERING = (1 << 1),
87
88 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
89 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
90 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
91 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
92 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
93 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
94 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
95 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
96 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
97 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
98
99 /* SIEN */
100 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
101
102 /*
103 * Others
104 */
105 SIL_QUIRK_MOD15WRITE = (1 << 0),
106 SIL_QUIRK_UDMA5MAX = (1 << 1),
107};
108
109static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
110static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
111static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
112static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
113static void sil_post_set_mode (struct ata_port *ap);
114static irqreturn_t sil_interrupt(int irq, void *dev_instance,
115 struct pt_regs *regs);
116static void sil_freeze(struct ata_port *ap);
117static void sil_thaw(struct ata_port *ap);
118
119
120static const struct pci_device_id sil_pci_tbl[] = {
121 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
122 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
123 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
124 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
125 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
126 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
127 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
128 { } /* terminate list */
129};
130
131
132/* TODO firmware versions should be added - eric */
133static const struct sil_drivelist {
134 const char * product;
135 unsigned int quirk;
136} sil_blacklist [] = {
137 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
138 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
139 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
140 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
141 { "ST380013AS", SIL_QUIRK_MOD15WRITE },
142 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
143 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
144 { "ST3160023AS", SIL_QUIRK_MOD15WRITE },
145 { "ST3120026AS", SIL_QUIRK_MOD15WRITE },
146 { "ST3200822AS", SIL_QUIRK_MOD15WRITE },
147 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
148 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
149 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
150 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
152 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
153 { }
154};
155
156static struct pci_driver sil_pci_driver = {
157 .name = DRV_NAME,
158 .id_table = sil_pci_tbl,
159 .probe = sil_init_one,
160 .remove = ata_pci_remove_one,
161};
162
163static struct scsi_host_template sil_sht = {
164 .module = THIS_MODULE,
165 .name = DRV_NAME,
166 .ioctl = ata_scsi_ioctl,
167 .queuecommand = ata_scsi_queuecmd,
168 .can_queue = ATA_DEF_QUEUE,
169 .this_id = ATA_SHT_THIS_ID,
170 .sg_tablesize = LIBATA_MAX_PRD,
171 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
172 .emulated = ATA_SHT_EMULATED,
173 .use_clustering = ATA_SHT_USE_CLUSTERING,
174 .proc_name = DRV_NAME,
175 .dma_boundary = ATA_DMA_BOUNDARY,
176 .slave_configure = ata_scsi_slave_config,
177 .slave_destroy = ata_scsi_slave_destroy,
178 .bios_param = ata_std_bios_param,
179};
180
181static const struct ata_port_operations sil_ops = {
182 .port_disable = ata_port_disable,
183 .dev_config = sil_dev_config,
184 .tf_load = ata_tf_load,
185 .tf_read = ata_tf_read,
186 .check_status = ata_check_status,
187 .exec_command = ata_exec_command,
188 .dev_select = ata_std_dev_select,
189 .post_set_mode = sil_post_set_mode,
190 .bmdma_setup = ata_bmdma_setup,
191 .bmdma_start = ata_bmdma_start,
192 .bmdma_stop = ata_bmdma_stop,
193 .bmdma_status = ata_bmdma_status,
194 .qc_prep = ata_qc_prep,
195 .qc_issue = ata_qc_issue_prot,
196 .data_xfer = ata_mmio_data_xfer,
197 .freeze = sil_freeze,
198 .thaw = sil_thaw,
199 .error_handler = ata_bmdma_error_handler,
200 .post_internal_cmd = ata_bmdma_post_internal_cmd,
201 .irq_handler = sil_interrupt,
202 .irq_clear = ata_bmdma_irq_clear,
203 .scr_read = sil_scr_read,
204 .scr_write = sil_scr_write,
205 .port_start = ata_port_start,
206 .port_stop = ata_port_stop,
207 .host_stop = ata_pci_host_stop,
208};
209
210static const struct ata_port_info sil_port_info[] = {
211 /* sil_3112 */
212 {
213 .sht = &sil_sht,
214 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
215 .pio_mask = 0x1f, /* pio0-4 */
216 .mwdma_mask = 0x07, /* mwdma0-2 */
217 .udma_mask = 0x3f, /* udma0-5 */
218 .port_ops = &sil_ops,
219 },
220 /* sil_3512 */
221 {
222 .sht = &sil_sht,
223 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x3f, /* udma0-5 */
227 .port_ops = &sil_ops,
228 },
229 /* sil_3114 */
230 {
231 .sht = &sil_sht,
232 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
233 .pio_mask = 0x1f, /* pio0-4 */
234 .mwdma_mask = 0x07, /* mwdma0-2 */
235 .udma_mask = 0x3f, /* udma0-5 */
236 .port_ops = &sil_ops,
237 },
238};
239
240/* per-port register offsets */
241/* TODO: we can probably calculate rather than use a table */
242static const struct {
243 unsigned long tf; /* ATA taskfile register block */
244 unsigned long ctl; /* ATA control/altstatus register block */
245 unsigned long bmdma; /* DMA register block */
246 unsigned long bmdma2; /* DMA register block #2 */
247 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
248 unsigned long scr; /* SATA control register block */
249 unsigned long sien; /* SATA Interrupt Enable register */
250 unsigned long xfer_mode;/* data transfer mode register */
251 unsigned long sfis_cfg; /* SATA FIS reception config register */
252} sil_port[] = {
253 /* port 0 ... */
254 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
255 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
256 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
257 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
258 /* ... port 3 */
259};
260
261MODULE_AUTHOR("Jeff Garzik");
262MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
263MODULE_LICENSE("GPL");
264MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
265MODULE_VERSION(DRV_VERSION);
266
267static int slow_down = 0;
268module_param(slow_down, int, 0444);
269MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
270
271
272static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
273{
274 u8 cache_line = 0;
275 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
276 return cache_line;
277}
278
279static void sil_post_set_mode (struct ata_port *ap)
280{
281 struct ata_host_set *host_set = ap->host_set;
282 struct ata_device *dev;
283 void __iomem *addr =
284 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
285 u32 tmp, dev_mode[2];
286 unsigned int i;
287
288 for (i = 0; i < 2; i++) {
289 dev = &ap->device[i];
290 if (!ata_dev_enabled(dev))
291 dev_mode[i] = 0; /* PIO0/1/2 */
292 else if (dev->flags & ATA_DFLAG_PIO)
293 dev_mode[i] = 1; /* PIO3/4 */
294 else
295 dev_mode[i] = 3; /* UDMA */
296 /* value 2 indicates MDMA */
297 }
298
299 tmp = readl(addr);
300 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
301 tmp |= dev_mode[0];
302 tmp |= (dev_mode[1] << 4);
303 writel(tmp, addr);
304 readl(addr); /* flush */
305}
306
307static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
308{
309 unsigned long offset = ap->ioaddr.scr_addr;
310
311 switch (sc_reg) {
312 case SCR_STATUS:
313 return offset + 4;
314 case SCR_ERROR:
315 return offset + 8;
316 case SCR_CONTROL:
317 return offset;
318 default:
319 /* do nothing */
320 break;
321 }
322
323 return 0;
324}
325
326static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
327{
328 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
329 if (mmio)
330 return readl(mmio);
331 return 0xffffffffU;
332}
333
334static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
335{
336 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
337 if (mmio)
338 writel(val, mmio);
339}
340
341static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
342{
343 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
344 u8 status;
345
346 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
347 u32 serror;
348
349 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
350 * controllers continue to assert IRQ as long as
351 * SError bits are pending. Clear SError immediately.
352 */
353 serror = sil_scr_read(ap, SCR_ERROR);
354 sil_scr_write(ap, SCR_ERROR, serror);
355
356 /* Trigger hotplug and accumulate SError only if the
357 * port isn't already frozen. Otherwise, PHY events
358 * during hardreset makes controllers with broken SIEN
359 * repeat probing needlessly.
360 */
361 if (!(ap->flags & ATA_FLAG_FROZEN)) {
362 ata_ehi_hotplugged(&ap->eh_info);
363 ap->eh_info.serror |= serror;
364 }
365
366 goto freeze;
367 }
368
369 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
370 goto freeze;
371
372 /* Check whether we are expecting interrupt in this state */
373 switch (ap->hsm_task_state) {
374 case HSM_ST_FIRST:
375 /* Some pre-ATAPI-4 devices assert INTRQ
376 * at this state when ready to receive CDB.
377 */
378
379 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
380 * The flag was turned on only for atapi devices.
381 * No need to check is_atapi_taskfile(&qc->tf) again.
382 */
383 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
384 goto err_hsm;
385 break;
386 case HSM_ST_LAST:
387 if (qc->tf.protocol == ATA_PROT_DMA ||
388 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
389 /* clear DMA-Start bit */
390 ap->ops->bmdma_stop(qc);
391
392 if (bmdma2 & SIL_DMA_ERROR) {
393 qc->err_mask |= AC_ERR_HOST_BUS;
394 ap->hsm_task_state = HSM_ST_ERR;
395 }
396 }
397 break;
398 case HSM_ST:
399 break;
400 default:
401 goto err_hsm;
402 }
403
404 /* check main status, clearing INTRQ */
405 status = ata_chk_status(ap);
406 if (unlikely(status & ATA_BUSY))
407 goto err_hsm;
408
409 /* ack bmdma irq events */
410 ata_bmdma_irq_clear(ap);
411
412 /* kick HSM in the ass */
413 ata_hsm_move(ap, qc, status, 0);
414
415 return;
416
417 err_hsm:
418 qc->err_mask |= AC_ERR_HSM;
419 freeze:
420 ata_port_freeze(ap);
421}
422
423static irqreturn_t sil_interrupt(int irq, void *dev_instance,
424 struct pt_regs *regs)
425{
426 struct ata_host_set *host_set = dev_instance;
427 void __iomem *mmio_base = host_set->mmio_base;
428 int handled = 0;
429 int i;
430
431 spin_lock(&host_set->lock);
432
433 for (i = 0; i < host_set->n_ports; i++) {
434 struct ata_port *ap = host_set->ports[i];
435 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
436
437 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
438 continue;
439
440 if (bmdma2 == 0xffffffff ||
441 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
442 continue;
443
444 sil_host_intr(ap, bmdma2);
445 handled = 1;
446 }
447
448 spin_unlock(&host_set->lock);
449
450 return IRQ_RETVAL(handled);
451}
452
453static void sil_freeze(struct ata_port *ap)
454{
455 void __iomem *mmio_base = ap->host_set->mmio_base;
456 u32 tmp;
457
458 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
459 writel(0, mmio_base + sil_port[ap->port_no].sien);
460
461 /* plug IRQ */
462 tmp = readl(mmio_base + SIL_SYSCFG);
463 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
464 writel(tmp, mmio_base + SIL_SYSCFG);
465 readl(mmio_base + SIL_SYSCFG); /* flush */
466}
467
468static void sil_thaw(struct ata_port *ap)
469{
470 void __iomem *mmio_base = ap->host_set->mmio_base;
471 u32 tmp;
472
473 /* clear IRQ */
474 ata_chk_status(ap);
475 ata_bmdma_irq_clear(ap);
476
477 /* turn on SATA IRQ */
478 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
479
480 /* turn on IRQ */
481 tmp = readl(mmio_base + SIL_SYSCFG);
482 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
483 writel(tmp, mmio_base + SIL_SYSCFG);
484}
485
486/**
487 * sil_dev_config - Apply device/host-specific errata fixups
488 * @ap: Port containing device to be examined
489 * @dev: Device to be examined
490 *
491 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
492 * device is known to be present, this function is called.
493 * We apply two errata fixups which are specific to Silicon Image,
494 * a Seagate and a Maxtor fixup.
495 *
496 * For certain Seagate devices, we must limit the maximum sectors
497 * to under 8K.
498 *
499 * For certain Maxtor devices, we must not program the drive
500 * beyond udma5.
501 *
502 * Both fixups are unfairly pessimistic. As soon as I get more
503 * information on these errata, I will create a more exhaustive
504 * list, and apply the fixups to only the specific
505 * devices/hosts/firmwares that need it.
506 *
507 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
508 * The Maxtor quirk is in the blacklist, but I'm keeping the original
509 * pessimistic fix for the following reasons...
510 * - There seems to be less info on it, only one device gleaned off the
511 * Windows driver, maybe only one is affected. More info would be greatly
512 * appreciated.
513 * - But then again UDMA5 is hardly anything to complain about
514 */
515static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
516{
517 unsigned int n, quirks = 0;
518 unsigned char model_num[41];
519
520 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
521
522 for (n = 0; sil_blacklist[n].product; n++)
523 if (!strcmp(sil_blacklist[n].product, model_num)) {
524 quirks = sil_blacklist[n].quirk;
525 break;
526 }
527
528 /* limit requests to 15 sectors */
529 if (slow_down ||
530 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
531 (quirks & SIL_QUIRK_MOD15WRITE))) {
532 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
533 "(mod15write workaround)\n");
534 dev->max_sectors = 15;
535 return;
536 }
537
538 /* limit to udma5 */
539 if (quirks & SIL_QUIRK_UDMA5MAX) {
540 ata_dev_printk(dev, KERN_INFO,
541 "applying Maxtor errata fix %s\n", model_num);
542 dev->udma_mask &= ATA_UDMA5;
543 return;
544 }
545}
546
547static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
548{
549 static int printed_version;
550 struct ata_probe_ent *probe_ent = NULL;
551 unsigned long base;
552 void __iomem *mmio_base;
553 int rc;
554 unsigned int i;
555 int pci_dev_busy = 0;
556 u32 tmp;
557 u8 cls;
558
559 if (!printed_version++)
560 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
561
562 rc = pci_enable_device(pdev);
563 if (rc)
564 return rc;
565
566 rc = pci_request_regions(pdev, DRV_NAME);
567 if (rc) {
568 pci_dev_busy = 1;
569 goto err_out;
570 }
571
572 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
573 if (rc)
574 goto err_out_regions;
575 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
576 if (rc)
577 goto err_out_regions;
578
579 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
580 if (probe_ent == NULL) {
581 rc = -ENOMEM;
582 goto err_out_regions;
583 }
584
585 INIT_LIST_HEAD(&probe_ent->node);
586 probe_ent->dev = pci_dev_to_dev(pdev);
587 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
588 probe_ent->sht = sil_port_info[ent->driver_data].sht;
589 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
590 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
591 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
592 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
593 probe_ent->irq = pdev->irq;
594 probe_ent->irq_flags = SA_SHIRQ;
595 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
596
597 mmio_base = pci_iomap(pdev, 5, 0);
598 if (mmio_base == NULL) {
599 rc = -ENOMEM;
600 goto err_out_free_ent;
601 }
602
603 probe_ent->mmio_base = mmio_base;
604
605 base = (unsigned long) mmio_base;
606
607 for (i = 0; i < probe_ent->n_ports; i++) {
608 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
609 probe_ent->port[i].altstatus_addr =
610 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
611 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
612 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
613 ata_std_ports(&probe_ent->port[i]);
614 }
615
616 /* Initialize FIFO PCI bus arbitration */
617 cls = sil_get_device_cache_line(pdev);
618 if (cls) {
619 cls >>= 3;
620 cls++; /* cls = (line_size/8)+1 */
621 for (i = 0; i < probe_ent->n_ports; i++)
622 writew(cls << 8 | cls,
623 mmio_base + sil_port[i].fifo_cfg);
624 } else
625 dev_printk(KERN_WARNING, &pdev->dev,
626 "cache line size not set. Driver may not function\n");
627
628 /* Apply R_ERR on DMA activate FIS errata workaround */
629 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
630 int cnt;
631
632 for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
633 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
634 if ((tmp & 0x3) != 0x01)
635 continue;
636 if (!cnt)
637 dev_printk(KERN_INFO, &pdev->dev,
638 "Applying R_ERR on DMA activate "
639 "FIS errata fix\n");
640 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
641 cnt++;
642 }
643 }
644
645 if (ent->driver_data == sil_3114) {
646 /* flip the magic "make 4 ports work" bit */
647 tmp = readl(mmio_base + sil_port[2].bmdma);
648 if ((tmp & SIL_INTR_STEERING) == 0)
649 writel(tmp | SIL_INTR_STEERING,
650 mmio_base + sil_port[2].bmdma);
651 }
652
653 pci_set_master(pdev);
654
655 /* FIXME: check ata_device_add return value */
656 ata_device_add(probe_ent);
657 kfree(probe_ent);
658
659 return 0;
660
661err_out_free_ent:
662 kfree(probe_ent);
663err_out_regions:
664 pci_release_regions(pdev);
665err_out:
666 if (!pci_dev_busy)
667 pci_disable_device(pdev);
668 return rc;
669}
670
671static int __init sil_init(void)
672{
673 return pci_module_init(&sil_pci_driver);
674}
675
676static void __exit sil_exit(void)
677{
678 pci_unregister_driver(&sil_pci_driver);
679}
680
681
682module_init(sil_init);
683module_exit(sil_exit);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
deleted file mode 100644
index c8b477c67247..000000000000
--- a/drivers/scsi/sata_sil24.c
+++ /dev/null
@@ -1,1181 +0,0 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <asm/io.h>
32
33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.24"
35
36/*
37 * Port request block (PRB) 32 bytes
38 */
39struct sil24_prb {
40 __le16 ctrl;
41 __le16 prot;
42 __le32 rx_cnt;
43 u8 fis[6 * 4];
44};
45
46/*
47 * Scatter gather entry (SGE) 16 bytes
48 */
49struct sil24_sge {
50 __le64 addr;
51 __le32 cnt;
52 __le32 flags;
53};
54
55/*
56 * Port multiplier
57 */
58struct sil24_port_multiplier {
59 __le32 diag;
60 __le32 sactive;
61};
62
63enum {
64 /*
65 * Global controller registers (128 bytes @ BAR0)
66 */
67 /* 32 bit regs */
68 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
69 HOST_CTRL = 0x40,
70 HOST_IRQ_STAT = 0x44,
71 HOST_PHY_CFG = 0x48,
72 HOST_BIST_CTRL = 0x50,
73 HOST_BIST_PTRN = 0x54,
74 HOST_BIST_STAT = 0x58,
75 HOST_MEM_BIST_STAT = 0x5c,
76 HOST_FLASH_CMD = 0x70,
77 /* 8 bit regs */
78 HOST_FLASH_DATA = 0x74,
79 HOST_TRANSITION_DETECT = 0x75,
80 HOST_GPIO_CTRL = 0x76,
81 HOST_I2C_ADDR = 0x78, /* 32 bit */
82 HOST_I2C_DATA = 0x7c,
83 HOST_I2C_XFER_CNT = 0x7e,
84 HOST_I2C_CTRL = 0x7f,
85
86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31),
88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95
96 /*
97 * Port registers
98 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
99 */
100 PORT_REGS_SIZE = 0x2000,
101
102 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
103 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
104
105 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
106 /* 32 bit regs */
107 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
108 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
109 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
110 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
111 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
112 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
113 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
114 PORT_CMD_ERR = 0x1024, /* command error number */
115 PORT_FIS_CFG = 0x1028,
116 PORT_FIFO_THRES = 0x102c,
117 /* 16 bit regs */
118 PORT_DECODE_ERR_CNT = 0x1040,
119 PORT_DECODE_ERR_THRESH = 0x1042,
120 PORT_CRC_ERR_CNT = 0x1044,
121 PORT_CRC_ERR_THRESH = 0x1046,
122 PORT_HSHK_ERR_CNT = 0x1048,
123 PORT_HSHK_ERR_THRESH = 0x104a,
124 /* 32 bit regs */
125 PORT_PHY_CFG = 0x1050,
126 PORT_SLOT_STAT = 0x1800,
127 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
128 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
129 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
130 PORT_SCONTROL = 0x1f00,
131 PORT_SSTATUS = 0x1f04,
132 PORT_SERROR = 0x1f08,
133 PORT_SACTIVE = 0x1f0c,
134
135 /* PORT_CTRL_STAT bits */
136 PORT_CS_PORT_RST = (1 << 0), /* port reset */
137 PORT_CS_DEV_RST = (1 << 1), /* device reset */
138 PORT_CS_INIT = (1 << 2), /* port initialize */
139 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
140 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
141 PORT_CS_RESUME = (1 << 6), /* port resume */
142 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
143 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
144 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
145
146 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
147 /* bits[11:0] are masked */
148 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
149 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
150 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
151 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
152 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
153 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
154 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
155 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
156 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
157 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
158 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
159 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
160
161 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
162 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
163 PORT_IRQ_UNK_FIS,
164
165 /* bits[27:16] are unmasked (raw) */
166 PORT_IRQ_RAW_SHIFT = 16,
167 PORT_IRQ_MASKED_MASK = 0x7ff,
168 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
169
170 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
171 PORT_IRQ_STEER_SHIFT = 30,
172 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
173
174 /* PORT_CMD_ERR constants */
175 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
176 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
177 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
178 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
179 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
180 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
181 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
182 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
183 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
184 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
185 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
186 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
187 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
188 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
189 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
190 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
191 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
192 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
193 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
194 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
195 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
196 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
197
198 /* bits of PRB control field */
199 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
200 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
201 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
202 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
203 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
204
205 /* PRB protocol field */
206 PRB_PROT_PACKET = (1 << 0),
207 PRB_PROT_TCQ = (1 << 1),
208 PRB_PROT_NCQ = (1 << 2),
209 PRB_PROT_READ = (1 << 3),
210 PRB_PROT_WRITE = (1 << 4),
211 PRB_PROT_TRANSPARENT = (1 << 5),
212
213 /*
214 * Other constants
215 */
216 SGE_TRM = (1 << 31), /* Last SGE in chain */
217 SGE_LNK = (1 << 30), /* linked list
218 Points to SGT, not SGE */
219 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
220 data address ignored */
221
222 SIL24_MAX_CMDS = 31,
223
224 /* board id */
225 BID_SIL3124 = 0,
226 BID_SIL3132 = 1,
227 BID_SIL3131 = 2,
228
229 /* host flags */
230 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
231 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
232 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
233 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
234
235 IRQ_STAT_4PORTS = 0xf,
236};
237
238struct sil24_ata_block {
239 struct sil24_prb prb;
240 struct sil24_sge sge[LIBATA_MAX_PRD];
241};
242
243struct sil24_atapi_block {
244 struct sil24_prb prb;
245 u8 cdb[16];
246 struct sil24_sge sge[LIBATA_MAX_PRD - 1];
247};
248
249union sil24_cmd_block {
250 struct sil24_ata_block ata;
251 struct sil24_atapi_block atapi;
252};
253
254static struct sil24_cerr_info {
255 unsigned int err_mask, action;
256 const char *desc;
257} sil24_cerr_db[] = {
258 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
259 "device error" },
260 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
261 "device error via D2H FIS" },
262 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
263 "device error via SDB FIS" },
264 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
265 "error in data FIS" },
266 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
267 "failed to transmit command FIS" },
268 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
269 "protocol mismatch" },
270 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
271 "data directon mismatch" },
272 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
273 "ran out of SGEs while writing" },
274 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
275 "ran out of SGEs while reading" },
276 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
277 "invalid data directon for ATAPI CDB" },
278 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
279 "SGT no on qword boundary" },
280 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
281 "PCI target abort while fetching SGT" },
282 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
283 "PCI master abort while fetching SGT" },
284 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
285 "PCI parity error while fetching SGT" },
286 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
287 "PRB not on qword boundary" },
288 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
289 "PCI target abort while fetching PRB" },
290 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
291 "PCI master abort while fetching PRB" },
292 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
293 "PCI parity error while fetching PRB" },
294 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
295 "undefined error while transferring data" },
296 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
297 "PCI target abort while transferring data" },
298 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
299 "PCI master abort while transferring data" },
300 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
301 "PCI parity error while transferring data" },
302 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
303 "FIS received while sending service FIS" },
304};
305
306/*
307 * ap->private_data
308 *
309 * The preview driver always returned 0 for status. We emulate it
310 * here from the previous interrupt.
311 */
312struct sil24_port_priv {
313 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
314 dma_addr_t cmd_block_dma; /* DMA base addr for them */
315 struct ata_taskfile tf; /* Cached taskfile registers */
316};
317
318/* ap->host_set->private_data */
319struct sil24_host_priv {
320 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
321 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
322};
323
324static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
325static u8 sil24_check_status(struct ata_port *ap);
326static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
327static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
328static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
329static void sil24_qc_prep(struct ata_queued_cmd *qc);
330static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
331static void sil24_irq_clear(struct ata_port *ap);
332static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
333static void sil24_freeze(struct ata_port *ap);
334static void sil24_thaw(struct ata_port *ap);
335static void sil24_error_handler(struct ata_port *ap);
336static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
337static int sil24_port_start(struct ata_port *ap);
338static void sil24_port_stop(struct ata_port *ap);
339static void sil24_host_stop(struct ata_host_set *host_set);
340static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
341
342static const struct pci_device_id sil24_pci_tbl[] = {
343 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
344 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
345 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
346 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
347 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
348 { } /* terminate list */
349};
350
351static struct pci_driver sil24_pci_driver = {
352 .name = DRV_NAME,
353 .id_table = sil24_pci_tbl,
354 .probe = sil24_init_one,
355 .remove = ata_pci_remove_one, /* safe? */
356};
357
358static struct scsi_host_template sil24_sht = {
359 .module = THIS_MODULE,
360 .name = DRV_NAME,
361 .ioctl = ata_scsi_ioctl,
362 .queuecommand = ata_scsi_queuecmd,
363 .change_queue_depth = ata_scsi_change_queue_depth,
364 .can_queue = SIL24_MAX_CMDS,
365 .this_id = ATA_SHT_THIS_ID,
366 .sg_tablesize = LIBATA_MAX_PRD,
367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
368 .emulated = ATA_SHT_EMULATED,
369 .use_clustering = ATA_SHT_USE_CLUSTERING,
370 .proc_name = DRV_NAME,
371 .dma_boundary = ATA_DMA_BOUNDARY,
372 .slave_configure = ata_scsi_slave_config,
373 .slave_destroy = ata_scsi_slave_destroy,
374 .bios_param = ata_std_bios_param,
375};
376
377static const struct ata_port_operations sil24_ops = {
378 .port_disable = ata_port_disable,
379
380 .dev_config = sil24_dev_config,
381
382 .check_status = sil24_check_status,
383 .check_altstatus = sil24_check_status,
384 .dev_select = ata_noop_dev_select,
385
386 .tf_read = sil24_tf_read,
387
388 .qc_prep = sil24_qc_prep,
389 .qc_issue = sil24_qc_issue,
390
391 .irq_handler = sil24_interrupt,
392 .irq_clear = sil24_irq_clear,
393
394 .scr_read = sil24_scr_read,
395 .scr_write = sil24_scr_write,
396
397 .freeze = sil24_freeze,
398 .thaw = sil24_thaw,
399 .error_handler = sil24_error_handler,
400 .post_internal_cmd = sil24_post_internal_cmd,
401
402 .port_start = sil24_port_start,
403 .port_stop = sil24_port_stop,
404 .host_stop = sil24_host_stop,
405};
406
407/*
408 * Use bits 30-31 of host_flags to encode available port numbers.
409 * Current maxium is 4.
410 */
411#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
412#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
413
414static struct ata_port_info sil24_port_info[] = {
415 /* sil_3124 */
416 {
417 .sht = &sil24_sht,
418 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
419 SIL24_FLAG_PCIX_IRQ_WOC,
420 .pio_mask = 0x1f, /* pio0-4 */
421 .mwdma_mask = 0x07, /* mwdma0-2 */
422 .udma_mask = 0x3f, /* udma0-5 */
423 .port_ops = &sil24_ops,
424 },
425 /* sil_3132 */
426 {
427 .sht = &sil24_sht,
428 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
429 .pio_mask = 0x1f, /* pio0-4 */
430 .mwdma_mask = 0x07, /* mwdma0-2 */
431 .udma_mask = 0x3f, /* udma0-5 */
432 .port_ops = &sil24_ops,
433 },
434 /* sil_3131/sil_3531 */
435 {
436 .sht = &sil24_sht,
437 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
438 .pio_mask = 0x1f, /* pio0-4 */
439 .mwdma_mask = 0x07, /* mwdma0-2 */
440 .udma_mask = 0x3f, /* udma0-5 */
441 .port_ops = &sil24_ops,
442 },
443};
444
445static int sil24_tag(int tag)
446{
447 if (unlikely(ata_tag_internal(tag)))
448 return 0;
449 return tag;
450}
451
452static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
453{
454 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
455
456 if (dev->cdb_len == 16)
457 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
458 else
459 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
460}
461
462static inline void sil24_update_tf(struct ata_port *ap)
463{
464 struct sil24_port_priv *pp = ap->private_data;
465 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
466 struct sil24_prb __iomem *prb = port;
467 u8 fis[6 * 4];
468
469 memcpy_fromio(fis, prb->fis, 6 * 4);
470 ata_tf_from_fis(fis, &pp->tf);
471}
472
473static u8 sil24_check_status(struct ata_port *ap)
474{
475 struct sil24_port_priv *pp = ap->private_data;
476 return pp->tf.command;
477}
478
479static int sil24_scr_map[] = {
480 [SCR_CONTROL] = 0,
481 [SCR_STATUS] = 1,
482 [SCR_ERROR] = 2,
483 [SCR_ACTIVE] = 3,
484};
485
486static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
487{
488 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
489 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
490 void __iomem *addr;
491 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
492 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
493 }
494 return 0xffffffffU;
495}
496
497static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
498{
499 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
500 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
501 void __iomem *addr;
502 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
503 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
504 }
505}
506
507static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
508{
509 struct sil24_port_priv *pp = ap->private_data;
510 *tf = pp->tf;
511}
512
513static int sil24_init_port(struct ata_port *ap)
514{
515 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
516 u32 tmp;
517
518 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
519 ata_wait_register(port + PORT_CTRL_STAT,
520 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
521 tmp = ata_wait_register(port + PORT_CTRL_STAT,
522 PORT_CS_RDY, 0, 10, 100);
523
524 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
525 return -EIO;
526 return 0;
527}
528
529static int sil24_softreset(struct ata_port *ap, unsigned int *class)
530{
531 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
532 struct sil24_port_priv *pp = ap->private_data;
533 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
534 dma_addr_t paddr = pp->cmd_block_dma;
535 u32 mask, irq_stat;
536 const char *reason;
537
538 DPRINTK("ENTER\n");
539
540 if (ata_port_offline(ap)) {
541 DPRINTK("PHY reports no device\n");
542 *class = ATA_DEV_NONE;
543 goto out;
544 }
545
546 /* put the port into known state */
547 if (sil24_init_port(ap)) {
548 reason ="port not ready";
549 goto err;
550 }
551
552 /* do SRST */
553 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
554 prb->fis[1] = 0; /* no PM yet */
555
556 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
557 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
558
559 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
560 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
561 100, ATA_TMOUT_BOOT / HZ * 1000);
562
563 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
564 irq_stat >>= PORT_IRQ_RAW_SHIFT;
565
566 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
567 if (irq_stat & PORT_IRQ_ERROR)
568 reason = "SRST command error";
569 else
570 reason = "timeout";
571 goto err;
572 }
573
574 sil24_update_tf(ap);
575 *class = ata_dev_classify(&pp->tf);
576
577 if (*class == ATA_DEV_UNKNOWN)
578 *class = ATA_DEV_NONE;
579
580 out:
581 DPRINTK("EXIT, class=%u\n", *class);
582 return 0;
583
584 err:
585 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
586 return -EIO;
587}
588
589static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
590{
591 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
592 const char *reason;
593 int tout_msec, rc;
594 u32 tmp;
595
596 /* sil24 does the right thing(tm) without any protection */
597 sata_set_spd(ap);
598
599 tout_msec = 100;
600 if (ata_port_online(ap))
601 tout_msec = 5000;
602
603 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
604 tmp = ata_wait_register(port + PORT_CTRL_STAT,
605 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
606
607 /* SStatus oscillates between zero and valid status after
608 * DEV_RST, debounce it.
609 */
610 rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst);
611 if (rc) {
612 reason = "PHY debouncing failed";
613 goto err;
614 }
615
616 if (tmp & PORT_CS_DEV_RST) {
617 if (ata_port_offline(ap))
618 return 0;
619 reason = "link not ready";
620 goto err;
621 }
622
623 /* Sil24 doesn't store signature FIS after hardreset, so we
624 * can't wait for BSY to clear. Some devices take a long time
625 * to get ready and those devices will choke if we don't wait
626 * for BSY clearance here. Tell libata to perform follow-up
627 * softreset.
628 */
629 return -EAGAIN;
630
631 err:
632 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
633 return -EIO;
634}
635
636static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
637 struct sil24_sge *sge)
638{
639 struct scatterlist *sg;
640 unsigned int idx = 0;
641
642 ata_for_each_sg(sg, qc) {
643 sge->addr = cpu_to_le64(sg_dma_address(sg));
644 sge->cnt = cpu_to_le32(sg_dma_len(sg));
645 if (ata_sg_is_last(sg, qc))
646 sge->flags = cpu_to_le32(SGE_TRM);
647 else
648 sge->flags = 0;
649
650 sge++;
651 idx++;
652 }
653}
654
655static void sil24_qc_prep(struct ata_queued_cmd *qc)
656{
657 struct ata_port *ap = qc->ap;
658 struct sil24_port_priv *pp = ap->private_data;
659 union sil24_cmd_block *cb;
660 struct sil24_prb *prb;
661 struct sil24_sge *sge;
662 u16 ctrl = 0;
663
664 cb = &pp->cmd_block[sil24_tag(qc->tag)];
665
666 switch (qc->tf.protocol) {
667 case ATA_PROT_PIO:
668 case ATA_PROT_DMA:
669 case ATA_PROT_NCQ:
670 case ATA_PROT_NODATA:
671 prb = &cb->ata.prb;
672 sge = cb->ata.sge;
673 break;
674
675 case ATA_PROT_ATAPI:
676 case ATA_PROT_ATAPI_DMA:
677 case ATA_PROT_ATAPI_NODATA:
678 prb = &cb->atapi.prb;
679 sge = cb->atapi.sge;
680 memset(cb->atapi.cdb, 0, 32);
681 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
682
683 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
684 if (qc->tf.flags & ATA_TFLAG_WRITE)
685 ctrl = PRB_CTRL_PACKET_WRITE;
686 else
687 ctrl = PRB_CTRL_PACKET_READ;
688 }
689 break;
690
691 default:
692 prb = NULL; /* shut up, gcc */
693 sge = NULL;
694 BUG();
695 }
696
697 prb->ctrl = cpu_to_le16(ctrl);
698 ata_tf_to_fis(&qc->tf, prb->fis, 0);
699
700 if (qc->flags & ATA_QCFLAG_DMAMAP)
701 sil24_fill_sg(qc, sge);
702}
703
704static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
705{
706 struct ata_port *ap = qc->ap;
707 struct sil24_port_priv *pp = ap->private_data;
708 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
709 unsigned int tag = sil24_tag(qc->tag);
710 dma_addr_t paddr;
711 void __iomem *activate;
712
713 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
714 activate = port + PORT_CMD_ACTIVATE + tag * 8;
715
716 writel((u32)paddr, activate);
717 writel((u64)paddr >> 32, activate + 4);
718
719 return 0;
720}
721
722static void sil24_irq_clear(struct ata_port *ap)
723{
724 /* unused */
725}
726
727static void sil24_freeze(struct ata_port *ap)
728{
729 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
730
731 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
732 * PORT_IRQ_ENABLE instead.
733 */
734 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
735}
736
737static void sil24_thaw(struct ata_port *ap)
738{
739 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
740 u32 tmp;
741
742 /* clear IRQ */
743 tmp = readl(port + PORT_IRQ_STAT);
744 writel(tmp, port + PORT_IRQ_STAT);
745
746 /* turn IRQ back on */
747 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
748}
749
750static void sil24_error_intr(struct ata_port *ap)
751{
752 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
753 struct ata_eh_info *ehi = &ap->eh_info;
754 int freeze = 0;
755 u32 irq_stat;
756
757 /* on error, we need to clear IRQ explicitly */
758 irq_stat = readl(port + PORT_IRQ_STAT);
759 writel(irq_stat, port + PORT_IRQ_STAT);
760
761 /* first, analyze and record host port events */
762 ata_ehi_clear_desc(ehi);
763
764 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
765
766 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
767 ata_ehi_hotplugged(ehi);
768 ata_ehi_push_desc(ehi, ", %s",
769 irq_stat & PORT_IRQ_PHYRDY_CHG ?
770 "PHY RDY changed" : "device exchanged");
771 freeze = 1;
772 }
773
774 if (irq_stat & PORT_IRQ_UNK_FIS) {
775 ehi->err_mask |= AC_ERR_HSM;
776 ehi->action |= ATA_EH_SOFTRESET;
777 ata_ehi_push_desc(ehi , ", unknown FIS");
778 freeze = 1;
779 }
780
781 /* deal with command error */
782 if (irq_stat & PORT_IRQ_ERROR) {
783 struct sil24_cerr_info *ci = NULL;
784 unsigned int err_mask = 0, action = 0;
785 struct ata_queued_cmd *qc;
786 u32 cerr;
787
788 /* analyze CMD_ERR */
789 cerr = readl(port + PORT_CMD_ERR);
790 if (cerr < ARRAY_SIZE(sil24_cerr_db))
791 ci = &sil24_cerr_db[cerr];
792
793 if (ci && ci->desc) {
794 err_mask |= ci->err_mask;
795 action |= ci->action;
796 ata_ehi_push_desc(ehi, ", %s", ci->desc);
797 } else {
798 err_mask |= AC_ERR_OTHER;
799 action |= ATA_EH_SOFTRESET;
800 ata_ehi_push_desc(ehi, ", unknown command error %d",
801 cerr);
802 }
803
804 /* record error info */
805 qc = ata_qc_from_tag(ap, ap->active_tag);
806 if (qc) {
807 sil24_update_tf(ap);
808 qc->err_mask |= err_mask;
809 } else
810 ehi->err_mask |= err_mask;
811
812 ehi->action |= action;
813 }
814
815 /* freeze or abort */
816 if (freeze)
817 ata_port_freeze(ap);
818 else
819 ata_port_abort(ap);
820}
821
822static void sil24_finish_qc(struct ata_queued_cmd *qc)
823{
824 if (qc->flags & ATA_QCFLAG_RESULT_TF)
825 sil24_update_tf(qc->ap);
826}
827
828static inline void sil24_host_intr(struct ata_port *ap)
829{
830 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
831 u32 slot_stat, qc_active;
832 int rc;
833
834 slot_stat = readl(port + PORT_SLOT_STAT);
835
836 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
837 sil24_error_intr(ap);
838 return;
839 }
840
841 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
842 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
843
844 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
845 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
846 if (rc > 0)
847 return;
848 if (rc < 0) {
849 struct ata_eh_info *ehi = &ap->eh_info;
850 ehi->err_mask |= AC_ERR_HSM;
851 ehi->action |= ATA_EH_SOFTRESET;
852 ata_port_freeze(ap);
853 return;
854 }
855
856 if (ata_ratelimit())
857 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
858 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
859 slot_stat, ap->active_tag, ap->sactive);
860}
861
862static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
863{
864 struct ata_host_set *host_set = dev_instance;
865 struct sil24_host_priv *hpriv = host_set->private_data;
866 unsigned handled = 0;
867 u32 status;
868 int i;
869
870 status = readl(hpriv->host_base + HOST_IRQ_STAT);
871
872 if (status == 0xffffffff) {
873 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
874 "PCI fault or device removal?\n");
875 goto out;
876 }
877
878 if (!(status & IRQ_STAT_4PORTS))
879 goto out;
880
881 spin_lock(&host_set->lock);
882
883 for (i = 0; i < host_set->n_ports; i++)
884 if (status & (1 << i)) {
885 struct ata_port *ap = host_set->ports[i];
886 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
887 sil24_host_intr(host_set->ports[i]);
888 handled++;
889 } else
890 printk(KERN_ERR DRV_NAME
891 ": interrupt from disabled port %d\n", i);
892 }
893
894 spin_unlock(&host_set->lock);
895 out:
896 return IRQ_RETVAL(handled);
897}
898
899static void sil24_error_handler(struct ata_port *ap)
900{
901 struct ata_eh_context *ehc = &ap->eh_context;
902
903 if (sil24_init_port(ap)) {
904 ata_eh_freeze_port(ap);
905 ehc->i.action |= ATA_EH_HARDRESET;
906 }
907
908 /* perform recovery */
909 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
910 ata_std_postreset);
911}
912
913static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
914{
915 struct ata_port *ap = qc->ap;
916
917 if (qc->flags & ATA_QCFLAG_FAILED)
918 qc->err_mask |= AC_ERR_OTHER;
919
920 /* make DMA engine forget about the failed command */
921 if (qc->err_mask)
922 sil24_init_port(ap);
923}
924
925static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
926{
927 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
928
929 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
930}
931
932static int sil24_port_start(struct ata_port *ap)
933{
934 struct device *dev = ap->host_set->dev;
935 struct sil24_port_priv *pp;
936 union sil24_cmd_block *cb;
937 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
938 dma_addr_t cb_dma;
939 int rc = -ENOMEM;
940
941 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
942 if (!pp)
943 goto err_out;
944
945 pp->tf.command = ATA_DRDY;
946
947 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
948 if (!cb)
949 goto err_out_pp;
950 memset(cb, 0, cb_size);
951
952 rc = ata_pad_alloc(ap, dev);
953 if (rc)
954 goto err_out_pad;
955
956 pp->cmd_block = cb;
957 pp->cmd_block_dma = cb_dma;
958
959 ap->private_data = pp;
960
961 return 0;
962
963err_out_pad:
964 sil24_cblk_free(pp, dev);
965err_out_pp:
966 kfree(pp);
967err_out:
968 return rc;
969}
970
971static void sil24_port_stop(struct ata_port *ap)
972{
973 struct device *dev = ap->host_set->dev;
974 struct sil24_port_priv *pp = ap->private_data;
975
976 sil24_cblk_free(pp, dev);
977 ata_pad_free(ap, dev);
978 kfree(pp);
979}
980
981static void sil24_host_stop(struct ata_host_set *host_set)
982{
983 struct sil24_host_priv *hpriv = host_set->private_data;
984 struct pci_dev *pdev = to_pci_dev(host_set->dev);
985
986 pci_iounmap(pdev, hpriv->host_base);
987 pci_iounmap(pdev, hpriv->port_base);
988 kfree(hpriv);
989}
990
991static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
992{
993 static int printed_version = 0;
994 unsigned int board_id = (unsigned int)ent->driver_data;
995 struct ata_port_info *pinfo = &sil24_port_info[board_id];
996 struct ata_probe_ent *probe_ent = NULL;
997 struct sil24_host_priv *hpriv = NULL;
998 void __iomem *host_base = NULL;
999 void __iomem *port_base = NULL;
1000 int i, rc;
1001 u32 tmp;
1002
1003 if (!printed_version++)
1004 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1005
1006 rc = pci_enable_device(pdev);
1007 if (rc)
1008 return rc;
1009
1010 rc = pci_request_regions(pdev, DRV_NAME);
1011 if (rc)
1012 goto out_disable;
1013
1014 rc = -ENOMEM;
1015 /* map mmio registers */
1016 host_base = pci_iomap(pdev, 0, 0);
1017 if (!host_base)
1018 goto out_free;
1019 port_base = pci_iomap(pdev, 2, 0);
1020 if (!port_base)
1021 goto out_free;
1022
1023 /* allocate & init probe_ent and hpriv */
1024 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1025 if (!probe_ent)
1026 goto out_free;
1027
1028 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1029 if (!hpriv)
1030 goto out_free;
1031
1032 probe_ent->dev = pci_dev_to_dev(pdev);
1033 INIT_LIST_HEAD(&probe_ent->node);
1034
1035 probe_ent->sht = pinfo->sht;
1036 probe_ent->host_flags = pinfo->host_flags;
1037 probe_ent->pio_mask = pinfo->pio_mask;
1038 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1039 probe_ent->udma_mask = pinfo->udma_mask;
1040 probe_ent->port_ops = pinfo->port_ops;
1041 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
1042
1043 probe_ent->irq = pdev->irq;
1044 probe_ent->irq_flags = SA_SHIRQ;
1045 probe_ent->mmio_base = port_base;
1046 probe_ent->private_data = hpriv;
1047
1048 hpriv->host_base = host_base;
1049 hpriv->port_base = port_base;
1050
1051 /*
1052 * Configure the device
1053 */
1054 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1055 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1056 if (rc) {
1057 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1058 if (rc) {
1059 dev_printk(KERN_ERR, &pdev->dev,
1060 "64-bit DMA enable failed\n");
1061 goto out_free;
1062 }
1063 }
1064 } else {
1065 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1066 if (rc) {
1067 dev_printk(KERN_ERR, &pdev->dev,
1068 "32-bit DMA enable failed\n");
1069 goto out_free;
1070 }
1071 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1072 if (rc) {
1073 dev_printk(KERN_ERR, &pdev->dev,
1074 "32-bit consistent DMA enable failed\n");
1075 goto out_free;
1076 }
1077 }
1078
1079 /* GPIO off */
1080 writel(0, host_base + HOST_FLASH_CMD);
1081
1082 /* Apply workaround for completion IRQ loss on PCI-X errata */
1083 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1084 tmp = readl(host_base + HOST_CTRL);
1085 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1086 dev_printk(KERN_INFO, &pdev->dev,
1087 "Applying completion IRQ loss on PCI-X "
1088 "errata fix\n");
1089 else
1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1091 }
1092
1093 /* clear global reset & mask interrupts during initialization */
1094 writel(0, host_base + HOST_CTRL);
1095
1096 for (i = 0; i < probe_ent->n_ports; i++) {
1097 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1098 unsigned long portu = (unsigned long)port;
1099
1100 probe_ent->port[i].cmd_addr = portu;
1101 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1102
1103 ata_std_ports(&probe_ent->port[i]);
1104
1105 /* Initial PHY setting */
1106 writel(0x20c, port + PORT_PHY_CFG);
1107
1108 /* Clear port RST */
1109 tmp = readl(port + PORT_CTRL_STAT);
1110 if (tmp & PORT_CS_PORT_RST) {
1111 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1112 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1113 PORT_CS_PORT_RST,
1114 PORT_CS_PORT_RST, 10, 100);
1115 if (tmp & PORT_CS_PORT_RST)
1116 dev_printk(KERN_ERR, &pdev->dev,
1117 "failed to clear port RST\n");
1118 }
1119
1120 /* Configure IRQ WoC */
1121 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1122 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1123 else
1124 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1125
1126 /* Zero error counters. */
1127 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1128 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1129 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1130 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1131 writel(0x0000, port + PORT_CRC_ERR_CNT);
1132 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1133
1134 /* Always use 64bit activation */
1135 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1136
1137 /* Clear port multiplier enable and resume bits */
1138 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1139 }
1140
1141 /* Turn on interrupts */
1142 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1143
1144 pci_set_master(pdev);
1145
1146 /* FIXME: check ata_device_add return value */
1147 ata_device_add(probe_ent);
1148
1149 kfree(probe_ent);
1150 return 0;
1151
1152 out_free:
1153 if (host_base)
1154 pci_iounmap(pdev, host_base);
1155 if (port_base)
1156 pci_iounmap(pdev, port_base);
1157 kfree(probe_ent);
1158 kfree(hpriv);
1159 pci_release_regions(pdev);
1160 out_disable:
1161 pci_disable_device(pdev);
1162 return rc;
1163}
1164
1165static int __init sil24_init(void)
1166{
1167 return pci_module_init(&sil24_pci_driver);
1168}
1169
1170static void __exit sil24_exit(void)
1171{
1172 pci_unregister_driver(&sil24_pci_driver);
1173}
1174
1175MODULE_AUTHOR("Tejun Heo");
1176MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1177MODULE_LICENSE("GPL");
1178MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1179
1180module_init(sil24_init);
1181module_exit(sil24_exit);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
deleted file mode 100644
index 809d337ed641..000000000000
--- a/drivers/scsi/sata_sis.c
+++ /dev/null
@@ -1,348 +0,0 @@
1/*
2 * sata_sis.c - Silicon Integrated Systems SATA
3 *
4 * Maintained by: Uwe Koziolek
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 Uwe Koziolek
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/config.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "sata_sis"
46#define DRV_VERSION "0.6"
47
48enum {
49 sis_180 = 0,
50 SIS_SCR_PCI_BAR = 5,
51
52 /* PCI configuration registers */
53 SIS_GENCTL = 0x54, /* IDE General Control register */
54 SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
55 SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
56 SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
57 SIS_PMR = 0x90, /* port mapping register */
58 SIS_PMR_COMBINED = 0x30,
59
60 /* random bits */
61 SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
62
63 GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
64};
65
66static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
67static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
68static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
69
70static const struct pci_device_id sis_pci_tbl[] = {
71 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
72 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
73 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
74 { } /* terminate list */
75};
76
77
78static struct pci_driver sis_pci_driver = {
79 .name = DRV_NAME,
80 .id_table = sis_pci_tbl,
81 .probe = sis_init_one,
82 .remove = ata_pci_remove_one,
83};
84
85static struct scsi_host_template sis_sht = {
86 .module = THIS_MODULE,
87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd,
90 .can_queue = ATA_DEF_QUEUE,
91 .this_id = ATA_SHT_THIS_ID,
92 .sg_tablesize = ATA_MAX_PRD,
93 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
94 .emulated = ATA_SHT_EMULATED,
95 .use_clustering = ATA_SHT_USE_CLUSTERING,
96 .proc_name = DRV_NAME,
97 .dma_boundary = ATA_DMA_BOUNDARY,
98 .slave_configure = ata_scsi_slave_config,
99 .slave_destroy = ata_scsi_slave_destroy,
100 .bios_param = ata_std_bios_param,
101};
102
103static const struct ata_port_operations sis_ops = {
104 .port_disable = ata_port_disable,
105 .tf_load = ata_tf_load,
106 .tf_read = ata_tf_read,
107 .check_status = ata_check_status,
108 .exec_command = ata_exec_command,
109 .dev_select = ata_std_dev_select,
110 .bmdma_setup = ata_bmdma_setup,
111 .bmdma_start = ata_bmdma_start,
112 .bmdma_stop = ata_bmdma_stop,
113 .bmdma_status = ata_bmdma_status,
114 .qc_prep = ata_qc_prep,
115 .qc_issue = ata_qc_issue_prot,
116 .data_xfer = ata_pio_data_xfer,
117 .freeze = ata_bmdma_freeze,
118 .thaw = ata_bmdma_thaw,
119 .error_handler = ata_bmdma_error_handler,
120 .post_internal_cmd = ata_bmdma_post_internal_cmd,
121 .irq_handler = ata_interrupt,
122 .irq_clear = ata_bmdma_irq_clear,
123 .scr_read = sis_scr_read,
124 .scr_write = sis_scr_write,
125 .port_start = ata_port_start,
126 .port_stop = ata_port_stop,
127 .host_stop = ata_host_stop,
128};
129
130static struct ata_port_info sis_port_info = {
131 .sht = &sis_sht,
132 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
133 .pio_mask = 0x1f,
134 .mwdma_mask = 0x7,
135 .udma_mask = 0x7f,
136 .port_ops = &sis_ops,
137};
138
139
140MODULE_AUTHOR("Uwe Koziolek");
141MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
142MODULE_LICENSE("GPL");
143MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
144MODULE_VERSION(DRV_VERSION);
145
146static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
147{
148 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
149
150 if (port_no) {
151 if (device == 0x182)
152 addr += SIS182_SATA1_OFS;
153 else
154 addr += SIS180_SATA1_OFS;
155 }
156
157 return addr;
158}
159
160static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
164 u32 val, val2 = 0;
165 u8 pmr;
166
167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
168 return 0xffffffff;
169
170 pci_read_config_byte(pdev, SIS_PMR, &pmr);
171
172 pci_read_config_dword(pdev, cfg_addr, &val);
173
174 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
175 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
176
177 return val|val2;
178}
179
180static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
181{
182 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
183 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
184 u8 pmr;
185
186 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
187 return;
188
189 pci_read_config_byte(pdev, SIS_PMR, &pmr);
190
191 pci_write_config_dword(pdev, cfg_addr, val);
192
193 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
194 pci_write_config_dword(pdev, cfg_addr+0x10, val);
195}
196
197static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
198{
199 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
200 u32 val, val2 = 0;
201 u8 pmr;
202
203 if (sc_reg > SCR_CONTROL)
204 return 0xffffffffU;
205
206 if (ap->flags & SIS_FLAG_CFGSCR)
207 return sis_scr_cfg_read(ap, sc_reg);
208
209 pci_read_config_byte(pdev, SIS_PMR, &pmr);
210
211 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
212
213 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
214 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
215
216 return val | val2;
217}
218
219static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
220{
221 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
222 u8 pmr;
223
224 if (sc_reg > SCR_CONTROL)
225 return;
226
227 pci_read_config_byte(pdev, SIS_PMR, &pmr);
228
229 if (ap->flags & SIS_FLAG_CFGSCR)
230 sis_scr_cfg_write(ap, sc_reg, val);
231 else {
232 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
233 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
234 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
235 }
236}
237
238static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
239{
240 static int printed_version;
241 struct ata_probe_ent *probe_ent = NULL;
242 int rc;
243 u32 genctl;
244 struct ata_port_info *ppi;
245 int pci_dev_busy = 0;
246 u8 pmr;
247 u8 port2_start;
248
249 if (!printed_version++)
250 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
251
252 rc = pci_enable_device(pdev);
253 if (rc)
254 return rc;
255
256 rc = pci_request_regions(pdev, DRV_NAME);
257 if (rc) {
258 pci_dev_busy = 1;
259 goto err_out;
260 }
261
262 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
263 if (rc)
264 goto err_out_regions;
265 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
266 if (rc)
267 goto err_out_regions;
268
269 ppi = &sis_port_info;
270 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
271 if (!probe_ent) {
272 rc = -ENOMEM;
273 goto err_out_regions;
274 }
275
276 /* check and see if the SCRs are in IO space or PCI cfg space */
277 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
278 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
279 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
280
281 /* if hardware thinks SCRs are in IO space, but there are
282 * no IO resources assigned, change to PCI cfg space.
283 */
284 if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
285 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
286 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
287 genctl &= ~GENCTL_IOMAPPED_SCR;
288 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
289 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
290 }
291
292 pci_read_config_byte(pdev, SIS_PMR, &pmr);
293 if (ent->device != 0x182) {
294 if ((pmr & SIS_PMR_COMBINED) == 0) {
295 dev_printk(KERN_INFO, &pdev->dev,
296 "Detected SiS 180/181 chipset in SATA mode\n");
297 port2_start = 64;
298 }
299 else {
300 dev_printk(KERN_INFO, &pdev->dev,
301 "Detected SiS 180/181 chipset in combined mode\n");
302 port2_start=0;
303 }
304 }
305 else {
306 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
307 port2_start = 0x20;
308 }
309
310 if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
311 probe_ent->port[0].scr_addr =
312 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
313 probe_ent->port[1].scr_addr =
314 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
315 }
316
317 pci_set_master(pdev);
318 pci_intx(pdev, 1);
319
320 /* FIXME: check ata_device_add return value */
321 ata_device_add(probe_ent);
322 kfree(probe_ent);
323
324 return 0;
325
326err_out_regions:
327 pci_release_regions(pdev);
328
329err_out:
330 if (!pci_dev_busy)
331 pci_disable_device(pdev);
332 return rc;
333
334}
335
336static int __init sis_init(void)
337{
338 return pci_module_init(&sis_pci_driver);
339}
340
341static void __exit sis_exit(void)
342{
343 pci_unregister_driver(&sis_pci_driver);
344}
345
346module_init(sis_init);
347module_exit(sis_exit);
348
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
deleted file mode 100644
index c94b870cf378..000000000000
--- a/drivers/scsi/sata_svw.c
+++ /dev/null
@@ -1,509 +0,0 @@
1/*
2 * sata_svw.c - ServerWorks / Apple K2 SATA
3 *
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 *
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
12 *
13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset...
15 *
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
36 *
37 */
38
39#include <linux/config.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
47#include <linux/device.h>
48#include <scsi/scsi_host.h>
49#include <linux/libata.h>
50
51#ifdef CONFIG_PPC_OF
52#include <asm/prom.h>
53#include <asm/pci-bridge.h>
54#endif /* CONFIG_PPC_OF */
55
56#define DRV_NAME "sata_svw"
57#define DRV_VERSION "1.8"
58
59enum {
60 /* Taskfile registers offsets */
61 K2_SATA_TF_CMD_OFFSET = 0x00,
62 K2_SATA_TF_DATA_OFFSET = 0x00,
63 K2_SATA_TF_ERROR_OFFSET = 0x04,
64 K2_SATA_TF_NSECT_OFFSET = 0x08,
65 K2_SATA_TF_LBAL_OFFSET = 0x0c,
66 K2_SATA_TF_LBAM_OFFSET = 0x10,
67 K2_SATA_TF_LBAH_OFFSET = 0x14,
68 K2_SATA_TF_DEVICE_OFFSET = 0x18,
69 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
70 K2_SATA_TF_CTL_OFFSET = 0x20,
71
72 /* DMA base */
73 K2_SATA_DMA_CMD_OFFSET = 0x30,
74
75 /* SCRs base */
76 K2_SATA_SCR_STATUS_OFFSET = 0x40,
77 K2_SATA_SCR_ERROR_OFFSET = 0x44,
78 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
79
80 /* Others */
81 K2_SATA_SICR1_OFFSET = 0x80,
82 K2_SATA_SICR2_OFFSET = 0x84,
83 K2_SATA_SIM_OFFSET = 0x88,
84
85 /* Port stride */
86 K2_SATA_PORT_OFFSET = 0x100,
87};
88
89static u8 k2_stat_check_status(struct ata_port *ap);
90
91
92static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
93{
94 if (sc_reg > SCR_CONTROL)
95 return 0xffffffffU;
96 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
97}
98
99
100static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
101 u32 val)
102{
103 if (sc_reg > SCR_CONTROL)
104 return;
105 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
106}
107
108
109static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
110{
111 struct ata_ioports *ioaddr = &ap->ioaddr;
112 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
113
114 if (tf->ctl != ap->last_ctl) {
115 writeb(tf->ctl, ioaddr->ctl_addr);
116 ap->last_ctl = tf->ctl;
117 ata_wait_idle(ap);
118 }
119 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
120 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
121 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
122 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
123 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
124 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
125 } else if (is_addr) {
126 writew(tf->feature, ioaddr->feature_addr);
127 writew(tf->nsect, ioaddr->nsect_addr);
128 writew(tf->lbal, ioaddr->lbal_addr);
129 writew(tf->lbam, ioaddr->lbam_addr);
130 writew(tf->lbah, ioaddr->lbah_addr);
131 }
132
133 if (tf->flags & ATA_TFLAG_DEVICE)
134 writeb(tf->device, ioaddr->device_addr);
135
136 ata_wait_idle(ap);
137}
138
139
140static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
141{
142 struct ata_ioports *ioaddr = &ap->ioaddr;
143 u16 nsect, lbal, lbam, lbah, feature;
144
145 tf->command = k2_stat_check_status(ap);
146 tf->device = readw(ioaddr->device_addr);
147 feature = readw(ioaddr->error_addr);
148 nsect = readw(ioaddr->nsect_addr);
149 lbal = readw(ioaddr->lbal_addr);
150 lbam = readw(ioaddr->lbam_addr);
151 lbah = readw(ioaddr->lbah_addr);
152
153 tf->feature = feature;
154 tf->nsect = nsect;
155 tf->lbal = lbal;
156 tf->lbam = lbam;
157 tf->lbah = lbah;
158
159 if (tf->flags & ATA_TFLAG_LBA48) {
160 tf->hob_feature = feature >> 8;
161 tf->hob_nsect = nsect >> 8;
162 tf->hob_lbal = lbal >> 8;
163 tf->hob_lbam = lbam >> 8;
164 tf->hob_lbah = lbah >> 8;
165 }
166}
167
168/**
169 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
170 * @qc: Info associated with this ATA transaction.
171 *
172 * LOCKING:
173 * spin_lock_irqsave(host_set lock)
174 */
175
176static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
177{
178 struct ata_port *ap = qc->ap;
179 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
180 u8 dmactl;
181 void *mmio = (void *) ap->ioaddr.bmdma_addr;
182 /* load PRD table addr. */
183 mb(); /* make sure PRD table writes are visible to controller */
184 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
185
186 /* specify data direction, triple-check start bit is clear */
187 dmactl = readb(mmio + ATA_DMA_CMD);
188 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
189 if (!rw)
190 dmactl |= ATA_DMA_WR;
191 writeb(dmactl, mmio + ATA_DMA_CMD);
192
193 /* issue r/w command if this is not a ATA DMA command*/
194 if (qc->tf.protocol != ATA_PROT_DMA)
195 ap->ops->exec_command(ap, &qc->tf);
196}
197
198/**
199 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
200 * @qc: Info associated with this ATA transaction.
201 *
202 * LOCKING:
203 * spin_lock_irqsave(host_set lock)
204 */
205
206static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
207{
208 struct ata_port *ap = qc->ap;
209 void *mmio = (void *) ap->ioaddr.bmdma_addr;
210 u8 dmactl;
211
212 /* start host DMA transaction */
213 dmactl = readb(mmio + ATA_DMA_CMD);
214 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
215 /* There is a race condition in certain SATA controllers that can
216 be seen when the r/w command is given to the controller before the
217 host DMA is started. On a Read command, the controller would initiate
218 the command to the drive even before it sees the DMA start. When there
219 are very fast drives connected to the controller, or when the data request
220 hits in the drive cache, there is the possibility that the drive returns a part
221 or all of the requested data to the controller before the DMA start is issued.
222 In this case, the controller would become confused as to what to do with the data.
223 In the worst case when all the data is returned back to the controller, the
224 controller could hang. In other cases it could return partial data returning
225 in data corruption. This problem has been seen in PPC systems and can also appear
226 on an system with very fast disks, where the SATA controller is sitting behind a
227 number of bridges, and hence there is significant latency between the r/w command
228 and the start command. */
229 /* issue r/w command if the access is to ATA*/
230 if (qc->tf.protocol == ATA_PROT_DMA)
231 ap->ops->exec_command(ap, &qc->tf);
232}
233
234
235static u8 k2_stat_check_status(struct ata_port *ap)
236{
237 return readl((void *) ap->ioaddr.status_addr);
238}
239
240#ifdef CONFIG_PPC_OF
241/*
242 * k2_sata_proc_info
243 * inout : decides on the direction of the dataflow and the meaning of the
244 * variables
245 * buffer: If inout==FALSE data is being written to it else read from it
246 * *start: If inout==FALSE start of the valid data in the buffer
247 * offset: If inout==FALSE offset from the beginning of the imaginary file
248 * from which we start writing into the buffer
249 * length: If inout==FALSE max number of bytes to be written into the buffer
250 * else number of bytes in the buffer
251 */
252static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
253 off_t offset, int count, int inout)
254{
255 struct ata_port *ap;
256 struct device_node *np;
257 int len, index;
258
259 /* Find the ata_port */
260 ap = ata_shost_to_port(shost);
261 if (ap == NULL)
262 return 0;
263
264 /* Find the OF node for the PCI device proper */
265 np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
266 if (np == NULL)
267 return 0;
268
269 /* Match it to a port node */
270 index = (ap == ap->host_set->ports[0]) ? 0 : 1;
271 for (np = np->child; np != NULL; np = np->sibling) {
272 u32 *reg = (u32 *)get_property(np, "reg", NULL);
273 if (!reg)
274 continue;
275 if (index == *reg)
276 break;
277 }
278 if (np == NULL)
279 return 0;
280
281 len = sprintf(page, "devspec: %s\n", np->full_name);
282
283 return len;
284}
285#endif /* CONFIG_PPC_OF */
286
287
288static struct scsi_host_template k2_sata_sht = {
289 .module = THIS_MODULE,
290 .name = DRV_NAME,
291 .ioctl = ata_scsi_ioctl,
292 .queuecommand = ata_scsi_queuecmd,
293 .can_queue = ATA_DEF_QUEUE,
294 .this_id = ATA_SHT_THIS_ID,
295 .sg_tablesize = LIBATA_MAX_PRD,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING,
299 .proc_name = DRV_NAME,
300 .dma_boundary = ATA_DMA_BOUNDARY,
301 .slave_configure = ata_scsi_slave_config,
302 .slave_destroy = ata_scsi_slave_destroy,
303#ifdef CONFIG_PPC_OF
304 .proc_info = k2_sata_proc_info,
305#endif
306 .bios_param = ata_std_bios_param,
307};
308
309
310static const struct ata_port_operations k2_sata_ops = {
311 .port_disable = ata_port_disable,
312 .tf_load = k2_sata_tf_load,
313 .tf_read = k2_sata_tf_read,
314 .check_status = k2_stat_check_status,
315 .exec_command = ata_exec_command,
316 .dev_select = ata_std_dev_select,
317 .bmdma_setup = k2_bmdma_setup_mmio,
318 .bmdma_start = k2_bmdma_start_mmio,
319 .bmdma_stop = ata_bmdma_stop,
320 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot,
323 .data_xfer = ata_mmio_data_xfer,
324 .freeze = ata_bmdma_freeze,
325 .thaw = ata_bmdma_thaw,
326 .error_handler = ata_bmdma_error_handler,
327 .post_internal_cmd = ata_bmdma_post_internal_cmd,
328 .irq_handler = ata_interrupt,
329 .irq_clear = ata_bmdma_irq_clear,
330 .scr_read = k2_sata_scr_read,
331 .scr_write = k2_sata_scr_write,
332 .port_start = ata_port_start,
333 .port_stop = ata_port_stop,
334 .host_stop = ata_pci_host_stop,
335};
336
337static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
338{
339 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
340 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
341 port->feature_addr =
342 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
343 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
344 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
345 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
346 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
347 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
348 port->command_addr =
349 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
350 port->altstatus_addr =
351 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
352 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
353 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
354}
355
356
357static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
358{
359 static int printed_version;
360 struct ata_probe_ent *probe_ent = NULL;
361 unsigned long base;
362 void __iomem *mmio_base;
363 int pci_dev_busy = 0;
364 int rc;
365 int i;
366
367 if (!printed_version++)
368 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
369
370 /*
371 * If this driver happens to only be useful on Apple's K2, then
372 * we should check that here as it has a normal Serverworks ID
373 */
374 rc = pci_enable_device(pdev);
375 if (rc)
376 return rc;
377 /*
378 * Check if we have resources mapped at all (second function may
379 * have been disabled by firmware)
380 */
381 if (pci_resource_len(pdev, 5) == 0)
382 return -ENODEV;
383
384 /* Request PCI regions */
385 rc = pci_request_regions(pdev, DRV_NAME);
386 if (rc) {
387 pci_dev_busy = 1;
388 goto err_out;
389 }
390
391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
392 if (rc)
393 goto err_out_regions;
394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
395 if (rc)
396 goto err_out_regions;
397
398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
399 if (probe_ent == NULL) {
400 rc = -ENOMEM;
401 goto err_out_regions;
402 }
403
404 memset(probe_ent, 0, sizeof(*probe_ent));
405 probe_ent->dev = pci_dev_to_dev(pdev);
406 INIT_LIST_HEAD(&probe_ent->node);
407
408 mmio_base = pci_iomap(pdev, 5, 0);
409 if (mmio_base == NULL) {
410 rc = -ENOMEM;
411 goto err_out_free_ent;
412 }
413 base = (unsigned long) mmio_base;
414
415 /* Clear a magic bit in SCR1 according to Darwin, those help
416 * some funky seagate drives (though so far, those were already
417 * set by the firmware on the machines I had access to)
418 */
419 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
420 mmio_base + K2_SATA_SICR1_OFFSET);
421
422 /* Clear SATA error & interrupts we don't use */
423 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
424 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
425
426 probe_ent->sht = &k2_sata_sht;
427 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
428 ATA_FLAG_MMIO;
429 probe_ent->port_ops = &k2_sata_ops;
430 probe_ent->n_ports = 4;
431 probe_ent->irq = pdev->irq;
432 probe_ent->irq_flags = SA_SHIRQ;
433 probe_ent->mmio_base = mmio_base;
434
435 /* We don't care much about the PIO/UDMA masks, but the core won't like us
436 * if we don't fill these
437 */
438 probe_ent->pio_mask = 0x1f;
439 probe_ent->mwdma_mask = 0x7;
440 probe_ent->udma_mask = 0x7f;
441
442 /* different controllers have different number of ports - currently 4 or 8 */
443 /* All ports are on the same function. Multi-function device is no
444 * longer available. This should not be seen in any system. */
445 for (i = 0; i < ent->driver_data; i++)
446 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
447
448 pci_set_master(pdev);
449
450 /* FIXME: check ata_device_add return value */
451 ata_device_add(probe_ent);
452 kfree(probe_ent);
453
454 return 0;
455
456err_out_free_ent:
457 kfree(probe_ent);
458err_out_regions:
459 pci_release_regions(pdev);
460err_out:
461 if (!pci_dev_busy)
462 pci_disable_device(pdev);
463 return rc;
464}
465
466/* 0x240 is device ID for Apple K2 device
467 * 0x241 is device ID for Serverworks Frodo4
468 * 0x242 is device ID for Serverworks Frodo8
469 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
470 * controller
471 * */
472static const struct pci_device_id k2_sata_pci_tbl[] = {
473 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
475 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
476 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
477 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
478 { }
479};
480
481
482static struct pci_driver k2_sata_pci_driver = {
483 .name = DRV_NAME,
484 .id_table = k2_sata_pci_tbl,
485 .probe = k2_sata_init_one,
486 .remove = ata_pci_remove_one,
487};
488
489
490static int __init k2_sata_init(void)
491{
492 return pci_module_init(&k2_sata_pci_driver);
493}
494
495
496static void __exit k2_sata_exit(void)
497{
498 pci_unregister_driver(&k2_sata_pci_driver);
499}
500
501
502MODULE_AUTHOR("Benjamin Herrenschmidt");
503MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
504MODULE_LICENSE("GPL");
505MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
506MODULE_VERSION(DRV_VERSION);
507
508module_init(k2_sata_init);
509module_exit(k2_sata_exit);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
deleted file mode 100644
index 7f864410f7c2..000000000000
--- a/drivers/scsi/sata_sx4.c
+++ /dev/null
@@ -1,1502 +0,0 @@
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.9"
50
51
52enum {
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
56 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
59
60 PDC_20621_SEQCTL = 0x400,
61 PDC_20621_SEQMASK = 0x480,
62 PDC_20621_GENERAL_CTL = 0x484,
63 PDC_20621_PAGE_SIZE = (32 * 1024),
64
65 /* chosen, not constant, values; we design our own DIMM mem map */
66 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
67 PDC_20621_DIMM_BASE = 0x00200000,
68 PDC_20621_DIMM_DATA = (64 * 1024),
69 PDC_DIMM_DATA_STEP = (256 * 1024),
70 PDC_DIMM_WINDOW_STEP = (8 * 1024),
71 PDC_DIMM_HOST_PRD = (6 * 1024),
72 PDC_DIMM_HOST_PKT = (128 * 0),
73 PDC_DIMM_HPKT_PRD = (128 * 1),
74 PDC_DIMM_ATA_PKT = (128 * 2),
75 PDC_DIMM_APKT_PRD = (128 * 3),
76 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
77 PDC_PAGE_WINDOW = 0x40,
78 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
79 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
80 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
81
82 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
83
84 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
85 (1<<23),
86
87 board_20621 = 0, /* FastTrak S150 SX4 */
88
89 PDC_RESET = (1 << 11), /* HDMA reset */
90
91 PDC_MAX_HDMA = 32,
92 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
93
94 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
95 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
96 PDC_MAX_DIMM_MODULE = 0x02,
97 PDC_I2C_CONTROL_OFFSET = 0x48,
98 PDC_I2C_ADDR_DATA_OFFSET = 0x4C,
99 PDC_DIMM0_CONTROL_OFFSET = 0x80,
100 PDC_DIMM1_CONTROL_OFFSET = 0x84,
101 PDC_SDRAM_CONTROL_OFFSET = 0x88,
102 PDC_I2C_WRITE = 0x00000000,
103 PDC_I2C_READ = 0x00000040,
104 PDC_I2C_START = 0x00000080,
105 PDC_I2C_MASK_INT = 0x00000020,
106 PDC_I2C_COMPLETE = 0x00010000,
107 PDC_I2C_NO_ACK = 0x00100000,
108 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
109 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
110 PDC_DIMM_SPD_ROW_NUM = 3,
111 PDC_DIMM_SPD_COLUMN_NUM = 4,
112 PDC_DIMM_SPD_MODULE_ROW = 5,
113 PDC_DIMM_SPD_TYPE = 11,
114 PDC_DIMM_SPD_FRESH_RATE = 12,
115 PDC_DIMM_SPD_BANK_NUM = 17,
116 PDC_DIMM_SPD_CAS_LATENCY = 18,
117 PDC_DIMM_SPD_ATTRIBUTE = 21,
118 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
119 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
120 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
121 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
122 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
123 PDC_CTL_STATUS = 0x08,
124 PDC_DIMM_WINDOW_CTLR = 0x0C,
125 PDC_TIME_CONTROL = 0x3C,
126 PDC_TIME_PERIOD = 0x40,
127 PDC_TIME_COUNTER = 0x44,
128 PDC_GENERAL_CTLR = 0x484,
129 PCI_PLL_INIT = 0x8A531824,
130 PCI_X_TCOUNT = 0xEE1E5CFF
131};
132
133
134struct pdc_port_priv {
135 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
136 u8 *pkt;
137 dma_addr_t pkt_dma;
138};
139
140struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma;
144 unsigned int hdma_prod;
145 unsigned int hdma_cons;
146 struct {
147 struct ata_queued_cmd *qc;
148 unsigned int seq;
149 unsigned long pkt_ofs;
150 } hdma[32];
151};
152
153
154static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
155static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host_set *host_set);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
167 u32 device, u32 subaddr, u32 *pdata);
168static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
169static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
170#ifdef ATA_VERBOSE_DEBUG
171static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
172 void *psource, u32 offset, u32 size);
173#endif
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap);
177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178
179
180static struct scsi_host_template pdc_sata_sht = {
181 .module = THIS_MODULE,
182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd,
185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD,
188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
189 .emulated = ATA_SHT_EMULATED,
190 .use_clustering = ATA_SHT_USE_CLUSTERING,
191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param,
196};
197
198static const struct ata_port_operations pdc_20621_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = pdc_tf_load_mmio,
201 .tf_read = ata_tf_read,
202 .check_status = ata_check_status,
203 .exec_command = pdc_exec_command_mmio,
204 .dev_select = ata_std_dev_select,
205 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
209 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear,
212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215};
216
217static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */
219 {
220 .sht = &pdc_sata_sht,
221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
227 .port_ops = &pdc_20621_ops,
228 },
229
230};
231
232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
234 board_20621 },
235 { } /* terminate list */
236};
237
238
239static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl,
242 .probe = pdc_sata_init_one,
243 .remove = ata_pci_remove_one,
244};
245
246
247static void pdc20621_host_stop(struct ata_host_set *host_set)
248{
249 struct pci_dev *pdev = to_pci_dev(host_set->dev);
250 struct pdc_host_priv *hpriv = host_set->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252
253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv);
255
256 pci_iounmap(pdev, host_set->mmio_base);
257}
258
259static int pdc_port_start(struct ata_port *ap)
260{
261 struct device *dev = ap->host_set->dev;
262 struct pdc_port_priv *pp;
263 int rc;
264
265 rc = ata_port_start(ap);
266 if (rc)
267 return rc;
268
269 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
270 if (!pp) {
271 rc = -ENOMEM;
272 goto err_out;
273 }
274 memset(pp, 0, sizeof(*pp));
275
276 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
277 if (!pp->pkt) {
278 rc = -ENOMEM;
279 goto err_out_kfree;
280 }
281
282 ap->private_data = pp;
283
284 return 0;
285
286err_out_kfree:
287 kfree(pp);
288err_out:
289 ata_port_stop(ap);
290 return rc;
291}
292
293
294static void pdc_port_stop(struct ata_port *ap)
295{
296 struct device *dev = ap->host_set->dev;
297 struct pdc_port_priv *pp = ap->private_data;
298
299 ap->private_data = NULL;
300 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
301 kfree(pp);
302 ata_port_stop(ap);
303}
304
305
306static void pdc_20621_phy_reset (struct ata_port *ap)
307{
308 VPRINTK("ENTER\n");
309 ap->cbl = ATA_CBL_SATA;
310 ata_port_probe(ap);
311 ata_bus_reset(ap);
312}
313
314static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
315 unsigned int portno,
316 unsigned int total_len)
317{
318 u32 addr;
319 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
320 u32 *buf32 = (u32 *) buf;
321
322 /* output ATA packet S/G table */
323 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
324 (PDC_DIMM_DATA_STEP * portno);
325 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
326 buf32[dw] = cpu_to_le32(addr);
327 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
328
329 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
330 PDC_20621_DIMM_BASE +
331 (PDC_DIMM_WINDOW_STEP * portno) +
332 PDC_DIMM_APKT_PRD,
333 buf32[dw], buf32[dw + 1]);
334}
335
336static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
337 unsigned int portno,
338 unsigned int total_len)
339{
340 u32 addr;
341 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
342 u32 *buf32 = (u32 *) buf;
343
344 /* output Host DMA packet S/G table */
345 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
346 (PDC_DIMM_DATA_STEP * portno);
347
348 buf32[dw] = cpu_to_le32(addr);
349 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
350
351 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
352 PDC_20621_DIMM_BASE +
353 (PDC_DIMM_WINDOW_STEP * portno) +
354 PDC_DIMM_HPKT_PRD,
355 buf32[dw], buf32[dw + 1]);
356}
357
358static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
359 unsigned int devno, u8 *buf,
360 unsigned int portno)
361{
362 unsigned int i, dw;
363 u32 *buf32 = (u32 *) buf;
364 u8 dev_reg;
365
366 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
367 (PDC_DIMM_WINDOW_STEP * portno) +
368 PDC_DIMM_APKT_PRD;
369 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
370
371 i = PDC_DIMM_ATA_PKT;
372
373 /*
374 * Set up ATA packet
375 */
376 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
377 buf[i++] = PDC_PKT_READ;
378 else if (tf->protocol == ATA_PROT_NODATA)
379 buf[i++] = PDC_PKT_NODATA;
380 else
381 buf[i++] = 0;
382 buf[i++] = 0; /* reserved */
383 buf[i++] = portno + 1; /* seq. id */
384 buf[i++] = 0xff; /* delay seq. id */
385
386 /* dimm dma S/G, and next-pkt */
387 dw = i >> 2;
388 if (tf->protocol == ATA_PROT_NODATA)
389 buf32[dw] = 0;
390 else
391 buf32[dw] = cpu_to_le32(dimm_sg);
392 buf32[dw + 1] = 0;
393 i += 8;
394
395 if (devno == 0)
396 dev_reg = ATA_DEVICE_OBS;
397 else
398 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
399
400 /* select device */
401 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
402 buf[i++] = dev_reg;
403
404 /* device control register */
405 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
406 buf[i++] = tf->ctl;
407
408 return i;
409}
410
411static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
412 unsigned int portno)
413{
414 unsigned int dw;
415 u32 tmp, *buf32 = (u32 *) buf;
416
417 unsigned int host_sg = PDC_20621_DIMM_BASE +
418 (PDC_DIMM_WINDOW_STEP * portno) +
419 PDC_DIMM_HOST_PRD;
420 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HPKT_PRD;
423 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
424 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
425
426 dw = PDC_DIMM_HOST_PKT >> 2;
427
428 /*
429 * Set up Host DMA packet
430 */
431 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
432 tmp = PDC_PKT_READ;
433 else
434 tmp = 0;
435 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
436 tmp |= (0xff << 24); /* delay seq. id */
437 buf32[dw + 0] = cpu_to_le32(tmp);
438 buf32[dw + 1] = cpu_to_le32(host_sg);
439 buf32[dw + 2] = cpu_to_le32(dimm_sg);
440 buf32[dw + 3] = 0;
441
442 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
443 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
444 PDC_DIMM_HOST_PKT,
445 buf32[dw + 0],
446 buf32[dw + 1],
447 buf32[dw + 2],
448 buf32[dw + 3]);
449}
450
451static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452{
453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host_set->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462
463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464
465 VPRINTK("ata%u: ENTER\n", ap->id);
466
467 /* hard-code chip #0 */
468 mmio += PDC_CHIP0_OFS;
469
470 /*
471 * Build S/G table
472 */
473 idx = 0;
474 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(sg);
478 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4;
481
482 /*
483 * Build ATA, host DMA packets
484 */
485 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
486 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
487
488 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
489 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
490
491 if (qc->tf.flags & ATA_TFLAG_LBA48)
492 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
493 else
494 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
495
496 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
497
498 /* copy three S/G tables and two packets to DIMM MMIO window */
499 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
500 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
502 PDC_DIMM_HOST_PRD,
503 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
504
505 /* force host FIFO dump */
506 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
507
508 readl(dimm_mmio); /* MMIO PCI posting flush */
509
510 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
511}
512
513static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{
515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host_set->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host_set->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host_set *host_set = ap->host_set;
569 void __iomem *mmio = host_set->mmio_base;
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host_set->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host_set->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host_set->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio;
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633}
634#else
635static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636#endif /* ATA_VERBOSE_DEBUG */
637
638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{
640 struct ata_port *ap = qc->ap;
641 struct ata_host_set *host_set = ap->host_set;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host_set->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679}
680
681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{
683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA:
685 case ATA_PROT_NODATA:
686 pdc20621_packet_start(qc);
687 return 0;
688
689 case ATA_PROT_ATAPI_DMA:
690 BUG();
691 break;
692
693 default:
694 break;
695 }
696
697 return ata_qc_issue_prot(qc);
698}
699
700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
701 struct ata_queued_cmd *qc,
702 unsigned int doing_hdma,
703 void __iomem *mmio)
704{
705 unsigned int port_no = ap->port_no;
706 unsigned int port_ofs =
707 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
708 u8 status;
709 unsigned int handled = 0;
710
711 VPRINTK("ENTER\n");
712
713 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
714 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
715
716 /* step two - DMA from DIMM to host */
717 if (doing_hdma) {
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */
721 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
722 ata_qc_complete(qc);
723 pdc20621_pop_hdma(qc);
724 }
725
726 /* step one - exec ATA command */
727 else {
728 u8 seq = (u8) (port_no + 1 + 4);
729 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731
732 /* submit hdma pkt */
733 pdc20621_dump_hdma(qc);
734 pdc20621_push_hdma(qc, seq,
735 port_ofs + PDC_DIMM_HOST_PKT);
736 }
737 handled = 1;
738
739 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
740
741 /* step one - DMA from host to DIMM */
742 if (doing_hdma) {
743 u8 seq = (u8) (port_no + 1);
744 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
746
747 /* submit ata pkt */
748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
750 writel(port_ofs + PDC_DIMM_ATA_PKT,
751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
753 }
754
755 /* step two - execute ATA command */
756 else {
757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
759 /* get drive status; clear intr; complete txn */
760 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
761 ata_qc_complete(qc);
762 pdc20621_pop_hdma(qc);
763 }
764 handled = 1;
765
766 /* command completion, but no data xfer */
767 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
768
769 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
770 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
771 qc->err_mask |= ac_err_mask(status);
772 ata_qc_complete(qc);
773 handled = 1;
774
775 } else {
776 ap->stats.idle_irq++;
777 }
778
779 return handled;
780}
781
782static void pdc20621_irq_clear(struct ata_port *ap)
783{
784 struct ata_host_set *host_set = ap->host_set;
785 void __iomem *mmio = host_set->mmio_base;
786
787 mmio += PDC_CHIP0_OFS;
788
789 readl(mmio + PDC_20621_SEQMASK);
790}
791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{
794 struct ata_host_set *host_set = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host_set || !host_set->mmio_base) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host_set->mmio_base;
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host_set->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host_set->n_ports)
832 ap = NULL;
833 else
834 ap = host_set->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap &&
838 !(ap->flags & ATA_FLAG_DISABLED)) {
839 struct ata_queued_cmd *qc;
840
841 qc = ata_qc_from_tag(ap, ap->active_tag);
842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
843 handled += pdc20621_host_intr(ap, qc, (i > 4),
844 mmio_base);
845 }
846 }
847
848 spin_unlock(&host_set->lock);
849
850 VPRINTK("mask == 0x%x\n", mask);
851
852 VPRINTK("EXIT\n");
853
854 return IRQ_RETVAL(handled);
855}
856
857static void pdc_eng_timeout(struct ata_port *ap)
858{
859 u8 drv_stat;
860 struct ata_host_set *host_set = ap->host_set;
861 struct ata_queued_cmd *qc;
862 unsigned long flags;
863
864 DPRINTK("ENTER\n");
865
866 spin_lock_irqsave(&host_set->lock, flags);
867
868 qc = ata_qc_from_tag(ap, ap->active_tag);
869
870 switch (qc->tf.protocol) {
871 case ATA_PROT_DMA:
872 case ATA_PROT_NODATA:
873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
875 break;
876
877 default:
878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
879
880 ata_port_printk(ap, KERN_ERR,
881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
883
884 qc->err_mask |= ac_err_mask(drv_stat);
885 break;
886 }
887
888 spin_unlock_irqrestore(&host_set->lock, flags);
889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n");
891}
892
893static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
894{
895 WARN_ON (tf->protocol == ATA_PROT_DMA ||
896 tf->protocol == ATA_PROT_NODATA);
897 ata_tf_load(ap, tf);
898}
899
900
901static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
902{
903 WARN_ON (tf->protocol == ATA_PROT_DMA ||
904 tf->protocol == ATA_PROT_NODATA);
905 ata_exec_command(ap, tf);
906}
907
908
909static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
910{
911 port->cmd_addr = base;
912 port->data_addr = base;
913 port->feature_addr =
914 port->error_addr = base + 0x4;
915 port->nsect_addr = base + 0x8;
916 port->lbal_addr = base + 0xc;
917 port->lbam_addr = base + 0x10;
918 port->lbah_addr = base + 0x14;
919 port->device_addr = base + 0x18;
920 port->command_addr =
921 port->status_addr = base + 0x1c;
922 port->altstatus_addr =
923 port->ctl_addr = base + 0x38;
924}
925
926
927#ifdef ATA_VERBOSE_DEBUG
928static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
929 u32 offset, u32 size)
930{
931 u32 window_size;
932 u16 idx;
933 u8 page_mask;
934 long dist;
935 void __iomem *mmio = pe->mmio_base;
936 struct pdc_host_priv *hpriv = pe->private_data;
937 void __iomem *dimm_mmio = hpriv->dimm_mmio;
938
939 /* hard-code chip #0 */
940 mmio += PDC_CHIP0_OFS;
941
942 page_mask = 0x00;
943 window_size = 0x2000 * 4; /* 32K byte uchar size */
944 idx = (u16) (offset / window_size);
945
946 writel(0x01, mmio + PDC_GENERAL_CTLR);
947 readl(mmio + PDC_GENERAL_CTLR);
948 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
949 readl(mmio + PDC_DIMM_WINDOW_CTLR);
950
951 offset -= (idx * window_size);
952 idx++;
953 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
954 (long) (window_size - offset);
955 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
956 dist);
957
958 psource += dist;
959 size -= dist;
960 for (; (long) size >= (long) window_size ;) {
961 writel(0x01, mmio + PDC_GENERAL_CTLR);
962 readl(mmio + PDC_GENERAL_CTLR);
963 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
964 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
966 window_size / 4);
967 psource += window_size;
968 size -= window_size;
969 idx ++;
970 }
971
972 if (size) {
973 writel(0x01, mmio + PDC_GENERAL_CTLR);
974 readl(mmio + PDC_GENERAL_CTLR);
975 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
976 readl(mmio + PDC_DIMM_WINDOW_CTLR);
977 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
978 size / 4);
979 }
980}
981#endif
982
983
984static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
985 u32 offset, u32 size)
986{
987 u32 window_size;
988 u16 idx;
989 u8 page_mask;
990 long dist;
991 void __iomem *mmio = pe->mmio_base;
992 struct pdc_host_priv *hpriv = pe->private_data;
993 void __iomem *dimm_mmio = hpriv->dimm_mmio;
994
995 /* hard-code chip #0 */
996 mmio += PDC_CHIP0_OFS;
997
998 page_mask = 0x00;
999 window_size = 0x2000 * 4; /* 32K byte uchar size */
1000 idx = (u16) (offset / window_size);
1001
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004 offset -= (idx * window_size);
1005 idx++;
1006 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1007 (long) (window_size - offset);
1008 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009 writel(0x01, mmio + PDC_GENERAL_CTLR);
1010 readl(mmio + PDC_GENERAL_CTLR);
1011
1012 psource += dist;
1013 size -= dist;
1014 for (; (long) size >= (long) window_size ;) {
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_toio(dimm_mmio, psource, window_size / 4);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR);
1020 psource += window_size;
1021 size -= window_size;
1022 idx ++;
1023 }
1024
1025 if (size) {
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_toio(dimm_mmio, psource, size / 4);
1029 writel(0x01, mmio + PDC_GENERAL_CTLR);
1030 readl(mmio + PDC_GENERAL_CTLR);
1031 }
1032}
1033
1034
1035static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1036 u32 subaddr, u32 *pdata)
1037{
1038 void __iomem *mmio = pe->mmio_base;
1039 u32 i2creg = 0;
1040 u32 status;
1041 u32 count =0;
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 i2creg |= device << 24;
1047 i2creg |= subaddr << 16;
1048
1049 /* Set the device and subaddress */
1050 writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
1051 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1052
1053 /* Write Control to perform read operation, mask int */
1054 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055 mmio + PDC_I2C_CONTROL_OFFSET);
1056
1057 for (count = 0; count <= 1000; count ++) {
1058 status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
1059 if (status & PDC_I2C_COMPLETE) {
1060 status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1061 break;
1062 } else if (count == 1000)
1063 return 0;
1064 }
1065
1066 *pdata = (status >> 8) & 0x000000ff;
1067 return 1;
1068}
1069
1070
1071static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1072{
1073 u32 data=0 ;
1074 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1075 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076 if (data == 100)
1077 return 100;
1078 } else
1079 return 0;
1080
1081 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082 if(data <= 0x75)
1083 return 133;
1084 } else
1085 return 0;
1086
1087 return 0;
1088}
1089
1090
1091static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1092{
1093 u32 spd0[50];
1094 u32 data = 0;
1095 int size, i;
1096 u8 bdimmsize;
1097 void __iomem *mmio = pe->mmio_base;
1098 static const struct {
1099 unsigned int reg;
1100 unsigned int ofs;
1101 } pdc_i2c_read_data [] = {
1102 { PDC_DIMM_SPD_TYPE, 11 },
1103 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1104 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106 { PDC_DIMM_SPD_ROW_NUM, 3 },
1107 { PDC_DIMM_SPD_BANK_NUM, 17 },
1108 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1109 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114 };
1115
1116 /* hard-code chip #0 */
1117 mmio += PDC_CHIP0_OFS;
1118
1119 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1120 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1121 pdc_i2c_read_data[i].reg,
1122 &spd0[pdc_i2c_read_data[i].ofs]);
1123
1124 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1127 data |= (((((spd0[29] > spd0[28])
1128 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130
1131 if (spd0[18] & 0x08)
1132 data |= ((0x03) << 14);
1133 else if (spd0[18] & 0x04)
1134 data |= ((0x02) << 14);
1135 else if (spd0[18] & 0x01)
1136 data |= ((0x01) << 14);
1137 else
1138 data |= (0 << 14);
1139
1140 /*
1141 Calculate the size of bDIMMSize (power of 2) and
1142 merge the DIMM size by program start/end address.
1143 */
1144
1145 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1147 data |= (((size / 16) - 1) << 16);
1148 data |= (0 << 23);
1149 data |= 8;
1150 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1151 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1152 return size;
1153}
1154
1155
1156static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1157{
1158 u32 data, spd0;
1159 int error, i;
1160 void __iomem *mmio = pe->mmio_base;
1161
1162 /* hard-code chip #0 */
1163 mmio += PDC_CHIP0_OFS;
1164
1165 /*
1166 Set To Default : DIMM Module Global Control Register (0x022259F1)
1167 DIMM Arbitration Disable (bit 20)
1168 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169 Refresh Enable (bit 17)
1170 */
1171
1172 data = 0x022259F1;
1173 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1174 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1175
1176 /* Turn on for ECC */
1177 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1178 PDC_DIMM_SPD_TYPE, &spd0);
1179 if (spd0 == 0x02) {
1180 data |= (0x01 << 16);
1181 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1182 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1183 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1184 }
1185
1186 /* DIMM Initialization Select/Enable (bit 18/19) */
1187 data &= (~(1<<18));
1188 data |= (1<<19);
1189 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1190
1191 error = 1;
1192 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1193 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1194 if (!(data & (1<<19))) {
1195 error = 0;
1196 break;
1197 }
1198 msleep(i*100);
1199 }
1200 return error;
1201}
1202
1203
1204static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1205{
1206 int speed, size, length;
1207 u32 addr,spd0,pci_status;
1208 u32 tmp=0;
1209 u32 time_period=0;
1210 u32 tcount=0;
1211 u32 ticks=0;
1212 u32 clock=0;
1213 u32 fparam=0;
1214 void __iomem *mmio = pe->mmio_base;
1215
1216 /* hard-code chip #0 */
1217 mmio += PDC_CHIP0_OFS;
1218
1219 /* Initialize PLL based upon PCI Bus Frequency */
1220
1221 /* Initialize Time Period Register */
1222 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1223 time_period = readl(mmio + PDC_TIME_PERIOD);
1224 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1225
1226 /* Enable timer */
1227 writel(0x00001a0, mmio + PDC_TIME_CONTROL);
1228 readl(mmio + PDC_TIME_CONTROL);
1229
1230 /* Wait 3 seconds */
1231 msleep(3000);
1232
1233 /*
1234 When timer is enabled, counter is decreased every internal
1235 clock cycle.
1236 */
1237
1238 tcount = readl(mmio + PDC_TIME_COUNTER);
1239 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1240
1241 /*
1242 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1243 register should be >= (0xffffffff - 3x10^8).
1244 */
1245 if(tcount >= PCI_X_TCOUNT) {
1246 ticks = (time_period - tcount);
1247 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1248
1249 clock = (ticks / 300000);
1250 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1251
1252 clock = (clock * 33);
1253 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1254
1255 /* PLL F Param (bit 22:16) */
1256 fparam = (1400000 / clock) - 2;
1257 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1258
1259 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1260 pci_status = (0x8a001824 | (fparam << 16));
1261 } else
1262 pci_status = PCI_PLL_INIT;
1263
1264 /* Initialize PLL. */
1265 VPRINTK("pci_status: 0x%x\n", pci_status);
1266 writel(pci_status, mmio + PDC_CTL_STATUS);
1267 readl(mmio + PDC_CTL_STATUS);
1268
1269 /*
1270 Read SPD of DIMM by I2C interface,
1271 and program the DIMM Module Controller.
1272 */
1273 if (!(speed = pdc20621_detect_dimm(pe))) {
1274 printk(KERN_ERR "Detect Local DIMM Fail\n");
1275 return 1; /* DIMM error */
1276 }
1277 VPRINTK("Local DIMM Speed = %d\n", speed);
1278
1279 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1280 size = pdc20621_prog_dimm0(pe);
1281 VPRINTK("Local DIMM Size = %dMB\n",size);
1282
1283 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1284 if (pdc20621_prog_dimm_global(pe)) {
1285 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1286 return 1;
1287 }
1288
1289#ifdef ATA_VERBOSE_DEBUG
1290 {
1291 u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1292 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
1293 '1','.','1','0',
1294 '9','8','0','3','1','6','1','2',0,0};
1295 u8 test_parttern2[40] = {0};
1296
1297 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
1299
1300 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1301 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1302 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1303 test_parttern2[1], &(test_parttern2[2]));
1304 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1305 40);
1306 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1307 test_parttern2[1], &(test_parttern2[2]));
1308
1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1312 test_parttern2[1], &(test_parttern2[2]));
1313 }
1314#endif
1315
1316 /* ECC initiliazation. */
1317
1318 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1319 PDC_DIMM_SPD_TYPE, &spd0);
1320 if (spd0 == 0x02) {
1321 VPRINTK("Start ECC initialization\n");
1322 addr = 0;
1323 length = size * 1024 * 1024;
1324 while (addr < length) {
1325 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1326 sizeof(u32));
1327 addr += sizeof(u32);
1328 }
1329 VPRINTK("Finish ECC initialization\n");
1330 }
1331 return 0;
1332}
1333
1334
1335static void pdc_20621_init(struct ata_probe_ent *pe)
1336{
1337 u32 tmp;
1338 void __iomem *mmio = pe->mmio_base;
1339
1340 /* hard-code chip #0 */
1341 mmio += PDC_CHIP0_OFS;
1342
1343 /*
1344 * Select page 0x40 for our 32k DIMM window
1345 */
1346 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1347 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1348 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1349
1350 /*
1351 * Reset Host DMA
1352 */
1353 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1354 tmp |= PDC_RESET;
1355 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1356 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1357
1358 udelay(10);
1359
1360 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1361 tmp &= ~PDC_RESET;
1362 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1363 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1364}
1365
1366static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1367{
1368 static int printed_version;
1369 struct ata_probe_ent *probe_ent = NULL;
1370 unsigned long base;
1371 void __iomem *mmio_base;
1372 void __iomem *dimm_mmio = NULL;
1373 struct pdc_host_priv *hpriv = NULL;
1374 unsigned int board_idx = (unsigned int) ent->driver_data;
1375 int pci_dev_busy = 0;
1376 int rc;
1377
1378 if (!printed_version++)
1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1380
1381 rc = pci_enable_device(pdev);
1382 if (rc)
1383 return rc;
1384
1385 rc = pci_request_regions(pdev, DRV_NAME);
1386 if (rc) {
1387 pci_dev_busy = 1;
1388 goto err_out;
1389 }
1390
1391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1392 if (rc)
1393 goto err_out_regions;
1394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1395 if (rc)
1396 goto err_out_regions;
1397
1398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1399 if (probe_ent == NULL) {
1400 rc = -ENOMEM;
1401 goto err_out_regions;
1402 }
1403
1404 memset(probe_ent, 0, sizeof(*probe_ent));
1405 probe_ent->dev = pci_dev_to_dev(pdev);
1406 INIT_LIST_HEAD(&probe_ent->node);
1407
1408 mmio_base = pci_iomap(pdev, 3, 0);
1409 if (mmio_base == NULL) {
1410 rc = -ENOMEM;
1411 goto err_out_free_ent;
1412 }
1413 base = (unsigned long) mmio_base;
1414
1415 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1416 if (!hpriv) {
1417 rc = -ENOMEM;
1418 goto err_out_iounmap;
1419 }
1420 memset(hpriv, 0, sizeof(*hpriv));
1421
1422 dimm_mmio = pci_iomap(pdev, 4, 0);
1423 if (!dimm_mmio) {
1424 kfree(hpriv);
1425 rc = -ENOMEM;
1426 goto err_out_iounmap;
1427 }
1428
1429 hpriv->dimm_mmio = dimm_mmio;
1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
1436 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
1437
1438 probe_ent->irq = pdev->irq;
1439 probe_ent->irq_flags = SA_SHIRQ;
1440 probe_ent->mmio_base = mmio_base;
1441
1442 probe_ent->private_data = hpriv;
1443 base += PDC_CHIP0_OFS;
1444
1445 probe_ent->n_ports = 4;
1446 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
1447 pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
1448 pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
1449 pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
1450
1451 pci_set_master(pdev);
1452
1453 /* initialize adapter */
1454 /* initialize local dimm */
1455 if (pdc20621_dimm_init(probe_ent)) {
1456 rc = -ENOMEM;
1457 goto err_out_iounmap_dimm;
1458 }
1459 pdc_20621_init(probe_ent);
1460
1461 /* FIXME: check ata_device_add return value */
1462 ata_device_add(probe_ent);
1463 kfree(probe_ent);
1464
1465 return 0;
1466
1467err_out_iounmap_dimm: /* only get to this label if 20621 */
1468 kfree(hpriv);
1469 pci_iounmap(pdev, dimm_mmio);
1470err_out_iounmap:
1471 pci_iounmap(pdev, mmio_base);
1472err_out_free_ent:
1473 kfree(probe_ent);
1474err_out_regions:
1475 pci_release_regions(pdev);
1476err_out:
1477 if (!pci_dev_busy)
1478 pci_disable_device(pdev);
1479 return rc;
1480}
1481
1482
1483static int __init pdc_sata_init(void)
1484{
1485 return pci_module_init(&pdc_sata_pci_driver);
1486}
1487
1488
1489static void __exit pdc_sata_exit(void)
1490{
1491 pci_unregister_driver(&pdc_sata_pci_driver);
1492}
1493
1494
1495MODULE_AUTHOR("Jeff Garzik");
1496MODULE_DESCRIPTION("Promise SATA low-level driver");
1497MODULE_LICENSE("GPL");
1498MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1499MODULE_VERSION(DRV_VERSION);
1500
1501module_init(pdc_sata_init);
1502module_exit(pdc_sata_exit);
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
deleted file mode 100644
index f668c997e9af..000000000000
--- a/drivers/scsi/sata_uli.c
+++ /dev/null
@@ -1,301 +0,0 @@
1/*
2 * sata_uli.c - ULi Electronics SATA
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/device.h>
36#include <scsi/scsi_host.h>
37#include <linux/libata.h>
38
39#define DRV_NAME "sata_uli"
40#define DRV_VERSION "0.6"
41
42enum {
43 uli_5289 = 0,
44 uli_5287 = 1,
45 uli_5281 = 2,
46
47 uli_max_ports = 4,
48
49 /* PCI configuration registers */
50 ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
51 ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
52 ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
53 ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
54};
55
56struct uli_priv {
57 unsigned int scr_cfg_addr[uli_max_ports];
58};
59
60static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
61static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
62static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
63
64static const struct pci_device_id uli_pci_tbl[] = {
65 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
66 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
67 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
68 { } /* terminate list */
69};
70
71
72static struct pci_driver uli_pci_driver = {
73 .name = DRV_NAME,
74 .id_table = uli_pci_tbl,
75 .probe = uli_init_one,
76 .remove = ata_pci_remove_one,
77};
78
79static struct scsi_host_template uli_sht = {
80 .module = THIS_MODULE,
81 .name = DRV_NAME,
82 .ioctl = ata_scsi_ioctl,
83 .queuecommand = ata_scsi_queuecmd,
84 .can_queue = ATA_DEF_QUEUE,
85 .this_id = ATA_SHT_THIS_ID,
86 .sg_tablesize = LIBATA_MAX_PRD,
87 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
88 .emulated = ATA_SHT_EMULATED,
89 .use_clustering = ATA_SHT_USE_CLUSTERING,
90 .proc_name = DRV_NAME,
91 .dma_boundary = ATA_DMA_BOUNDARY,
92 .slave_configure = ata_scsi_slave_config,
93 .slave_destroy = ata_scsi_slave_destroy,
94 .bios_param = ata_std_bios_param,
95};
96
97static const struct ata_port_operations uli_ops = {
98 .port_disable = ata_port_disable,
99
100 .tf_load = ata_tf_load,
101 .tf_read = ata_tf_read,
102 .check_status = ata_check_status,
103 .exec_command = ata_exec_command,
104 .dev_select = ata_std_dev_select,
105
106 .bmdma_setup = ata_bmdma_setup,
107 .bmdma_start = ata_bmdma_start,
108 .bmdma_stop = ata_bmdma_stop,
109 .bmdma_status = ata_bmdma_status,
110 .qc_prep = ata_qc_prep,
111 .qc_issue = ata_qc_issue_prot,
112 .data_xfer = ata_pio_data_xfer,
113
114 .freeze = ata_bmdma_freeze,
115 .thaw = ata_bmdma_thaw,
116 .error_handler = ata_bmdma_error_handler,
117 .post_internal_cmd = ata_bmdma_post_internal_cmd,
118
119 .irq_handler = ata_interrupt,
120 .irq_clear = ata_bmdma_irq_clear,
121
122 .scr_read = uli_scr_read,
123 .scr_write = uli_scr_write,
124
125 .port_start = ata_port_start,
126 .port_stop = ata_port_stop,
127 .host_stop = ata_host_stop,
128};
129
130static struct ata_port_info uli_port_info = {
131 .sht = &uli_sht,
132 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
133 .pio_mask = 0x1f, /* pio0-4 */
134 .udma_mask = 0x7f, /* udma0-6 */
135 .port_ops = &uli_ops,
136};
137
138
139MODULE_AUTHOR("Peer Chen");
140MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
141MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
143MODULE_VERSION(DRV_VERSION);
144
145static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
146{
147 struct uli_priv *hpriv = ap->host_set->private_data;
148 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
149}
150
151static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
152{
153 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
154 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
155 u32 val;
156
157 pci_read_config_dword(pdev, cfg_addr, &val);
158 return val;
159}
160
161static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
162{
163 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
164 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
165
166 pci_write_config_dword(pdev, cfg_addr, val);
167}
168
169static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
170{
171 if (sc_reg > SCR_CONTROL)
172 return 0xffffffffU;
173
174 return uli_scr_cfg_read(ap, sc_reg);
175}
176
177static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
178{
179 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
180 return;
181
182 uli_scr_cfg_write(ap, sc_reg, val);
183}
184
185static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
186{
187 static int printed_version;
188 struct ata_probe_ent *probe_ent;
189 struct ata_port_info *ppi;
190 int rc;
191 unsigned int board_idx = (unsigned int) ent->driver_data;
192 int pci_dev_busy = 0;
193 struct uli_priv *hpriv;
194
195 if (!printed_version++)
196 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
197
198 rc = pci_enable_device(pdev);
199 if (rc)
200 return rc;
201
202 rc = pci_request_regions(pdev, DRV_NAME);
203 if (rc) {
204 pci_dev_busy = 1;
205 goto err_out;
206 }
207
208 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
209 if (rc)
210 goto err_out_regions;
211 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
212 if (rc)
213 goto err_out_regions;
214
215 ppi = &uli_port_info;
216 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
217 if (!probe_ent) {
218 rc = -ENOMEM;
219 goto err_out_regions;
220 }
221
222 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
223 if (!hpriv) {
224 rc = -ENOMEM;
225 goto err_out_probe_ent;
226 }
227
228 probe_ent->private_data = hpriv;
229
230 switch (board_idx) {
231 case uli_5287:
232 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
233 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
234 probe_ent->n_ports = 4;
235
236 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
237 probe_ent->port[2].altstatus_addr =
238 probe_ent->port[2].ctl_addr =
239 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
240 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
241 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
242
243 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
244 probe_ent->port[3].altstatus_addr =
245 probe_ent->port[3].ctl_addr =
246 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
247 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
248 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
249
250 ata_std_ports(&probe_ent->port[2]);
251 ata_std_ports(&probe_ent->port[3]);
252 break;
253
254 case uli_5289:
255 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
256 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
257 break;
258
259 case uli_5281:
260 hpriv->scr_cfg_addr[0] = ULI5281_BASE;
261 hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
262 break;
263
264 default:
265 BUG();
266 break;
267 }
268
269 pci_set_master(pdev);
270 pci_intx(pdev, 1);
271
272 /* FIXME: check ata_device_add return value */
273 ata_device_add(probe_ent);
274 kfree(probe_ent);
275
276 return 0;
277
278err_out_probe_ent:
279 kfree(probe_ent);
280err_out_regions:
281 pci_release_regions(pdev);
282err_out:
283 if (!pci_dev_busy)
284 pci_disable_device(pdev);
285 return rc;
286
287}
288
289static int __init uli_init(void)
290{
291 return pci_module_init(&uli_pci_driver);
292}
293
294static void __exit uli_exit(void)
295{
296 pci_unregister_driver(&uli_pci_driver);
297}
298
299
300module_init(uli_init);
301module_exit(uli_exit);
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
deleted file mode 100644
index 322890b400a6..000000000000
--- a/drivers/scsi/sata_via.c
+++ /dev/null
@@ -1,394 +0,0 @@
1/*
2 * sata_via.c - VIA Serial ATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available under NDA.
31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
36 */
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/blkdev.h>
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47#include <asm/io.h>
48
49#define DRV_NAME "sata_via"
50#define DRV_VERSION "1.2"
51
52enum board_ids_enum {
53 vt6420,
54 vt6421,
55};
56
57enum {
58 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2,
67
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72};
73
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77
78static const struct pci_device_id svia_pci_tbl[] = {
79 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
80 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
81
82 { } /* terminate list */
83};
84
85static struct pci_driver svia_pci_driver = {
86 .name = DRV_NAME,
87 .id_table = svia_pci_tbl,
88 .probe = svia_init_one,
89 .remove = ata_pci_remove_one,
90};
91
92static struct scsi_host_template svia_sht = {
93 .module = THIS_MODULE,
94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd,
97 .can_queue = ATA_DEF_QUEUE,
98 .this_id = ATA_SHT_THIS_ID,
99 .sg_tablesize = LIBATA_MAX_PRD,
100 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
101 .emulated = ATA_SHT_EMULATED,
102 .use_clustering = ATA_SHT_USE_CLUSTERING,
103 .proc_name = DRV_NAME,
104 .dma_boundary = ATA_DMA_BOUNDARY,
105 .slave_configure = ata_scsi_slave_config,
106 .slave_destroy = ata_scsi_slave_destroy,
107 .bios_param = ata_std_bios_param,
108};
109
110static const struct ata_port_operations svia_sata_ops = {
111 .port_disable = ata_port_disable,
112
113 .tf_load = ata_tf_load,
114 .tf_read = ata_tf_read,
115 .check_status = ata_check_status,
116 .exec_command = ata_exec_command,
117 .dev_select = ata_std_dev_select,
118
119 .bmdma_setup = ata_bmdma_setup,
120 .bmdma_start = ata_bmdma_start,
121 .bmdma_stop = ata_bmdma_stop,
122 .bmdma_status = ata_bmdma_status,
123
124 .qc_prep = ata_qc_prep,
125 .qc_issue = ata_qc_issue_prot,
126 .data_xfer = ata_pio_data_xfer,
127
128 .freeze = ata_bmdma_freeze,
129 .thaw = ata_bmdma_thaw,
130 .error_handler = ata_bmdma_error_handler,
131 .post_internal_cmd = ata_bmdma_post_internal_cmd,
132
133 .irq_handler = ata_interrupt,
134 .irq_clear = ata_bmdma_irq_clear,
135
136 .scr_read = svia_scr_read,
137 .scr_write = svia_scr_write,
138
139 .port_start = ata_port_start,
140 .port_stop = ata_port_stop,
141 .host_stop = ata_host_stop,
142};
143
144static struct ata_port_info svia_port_info = {
145 .sht = &svia_sht,
146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
147 .pio_mask = 0x1f,
148 .mwdma_mask = 0x07,
149 .udma_mask = 0x7f,
150 .port_ops = &svia_sata_ops,
151};
152
153MODULE_AUTHOR("Jeff Garzik");
154MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
155MODULE_LICENSE("GPL");
156MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
157MODULE_VERSION(DRV_VERSION);
158
159static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 if (sc_reg > SCR_CONTROL)
162 return 0xffffffffU;
163 return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
164}
165
166static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
167{
168 if (sc_reg > SCR_CONTROL)
169 return;
170 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
171}
172
173static const unsigned int svia_bar_sizes[] = {
174 8, 4, 8, 4, 16, 256
175};
176
177static const unsigned int vt6421_bar_sizes[] = {
178 16, 16, 16, 16, 32, 128
179};
180
181static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
182{
183 return addr + (port * 128);
184}
185
186static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
187{
188 return addr + (port * 64);
189}
190
191static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
192 struct pci_dev *pdev,
193 unsigned int port)
194{
195 unsigned long reg_addr = pci_resource_start(pdev, port);
196 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
197 unsigned long scr_addr;
198
199 probe_ent->port[port].cmd_addr = reg_addr;
200 probe_ent->port[port].altstatus_addr =
201 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
202 probe_ent->port[port].bmdma_addr = bmdma_addr;
203
204 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
205 probe_ent->port[port].scr_addr = scr_addr;
206
207 ata_std_ports(&probe_ent->port[port]);
208}
209
210static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
211{
212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info;
214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent)
217 return NULL;
218
219 probe_ent->port[0].scr_addr =
220 svia_scr_addr(pci_resource_start(pdev, 5), 0);
221 probe_ent->port[1].scr_addr =
222 svia_scr_addr(pci_resource_start(pdev, 5), 1);
223
224 return probe_ent;
225}
226
227static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
228{
229 struct ata_probe_ent *probe_ent;
230 unsigned int i;
231
232 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
233 if (!probe_ent)
234 return NULL;
235
236 memset(probe_ent, 0, sizeof(*probe_ent));
237 probe_ent->dev = pci_dev_to_dev(pdev);
238 INIT_LIST_HEAD(&probe_ent->node);
239
240 probe_ent->sht = &svia_sht;
241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
242 probe_ent->port_ops = &svia_sata_ops;
243 probe_ent->n_ports = N_PORTS;
244 probe_ent->irq = pdev->irq;
245 probe_ent->irq_flags = SA_SHIRQ;
246 probe_ent->pio_mask = 0x1f;
247 probe_ent->mwdma_mask = 0x07;
248 probe_ent->udma_mask = 0x7f;
249
250 for (i = 0; i < N_PORTS; i++)
251 vt6421_init_addrs(probe_ent, pdev, i);
252
253 return probe_ent;
254}
255
256static void svia_configure(struct pci_dev *pdev)
257{
258 u8 tmp8;
259
260 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
261 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
262 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
263
264 /* make sure SATA channels are enabled */
265 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
266 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
267 dev_printk(KERN_DEBUG, &pdev->dev,
268 "enabling SATA channels (0x%x)\n",
269 (int) tmp8);
270 tmp8 |= ALL_PORTS;
271 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
272 }
273
274 /* make sure interrupts for each channel sent to us */
275 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
276 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
277 dev_printk(KERN_DEBUG, &pdev->dev,
278 "enabling SATA channel interrupts (0x%x)\n",
279 (int) tmp8);
280 tmp8 |= ALL_PORTS;
281 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
282 }
283
284 /* make sure native mode is enabled */
285 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
286 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
287 dev_printk(KERN_DEBUG, &pdev->dev,
288 "enabling SATA channel native mode (0x%x)\n",
289 (int) tmp8);
290 tmp8 |= NATIVE_MODE_ALL;
291 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
292 }
293}
294
295static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
296{
297 static int printed_version;
298 unsigned int i;
299 int rc;
300 struct ata_probe_ent *probe_ent;
301 int board_id = (int) ent->driver_data;
302 const int *bar_sizes;
303 int pci_dev_busy = 0;
304 u8 tmp8;
305
306 if (!printed_version++)
307 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
308
309 rc = pci_enable_device(pdev);
310 if (rc)
311 return rc;
312
313 rc = pci_request_regions(pdev, DRV_NAME);
314 if (rc) {
315 pci_dev_busy = 1;
316 goto err_out;
317 }
318
319 if (board_id == vt6420) {
320 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
321 if (tmp8 & SATA_2DEV) {
322 dev_printk(KERN_ERR, &pdev->dev,
323 "SATA master/slave not supported (0x%x)\n",
324 (int) tmp8);
325 rc = -EIO;
326 goto err_out_regions;
327 }
328
329 bar_sizes = &svia_bar_sizes[0];
330 } else {
331 bar_sizes = &vt6421_bar_sizes[0];
332 }
333
334 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
335 if ((pci_resource_start(pdev, i) == 0) ||
336 (pci_resource_len(pdev, i) < bar_sizes[i])) {
337 dev_printk(KERN_ERR, &pdev->dev,
338 "invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n",
339 i,
340 pci_resource_start(pdev, i),
341 pci_resource_len(pdev, i));
342 rc = -ENODEV;
343 goto err_out_regions;
344 }
345
346 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
347 if (rc)
348 goto err_out_regions;
349 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
350 if (rc)
351 goto err_out_regions;
352
353 if (board_id == vt6420)
354 probe_ent = vt6420_init_probe_ent(pdev);
355 else
356 probe_ent = vt6421_init_probe_ent(pdev);
357
358 if (!probe_ent) {
359 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
360 rc = -ENOMEM;
361 goto err_out_regions;
362 }
363
364 svia_configure(pdev);
365
366 pci_set_master(pdev);
367
368 /* FIXME: check ata_device_add return value */
369 ata_device_add(probe_ent);
370 kfree(probe_ent);
371
372 return 0;
373
374err_out_regions:
375 pci_release_regions(pdev);
376err_out:
377 if (!pci_dev_busy)
378 pci_disable_device(pdev);
379 return rc;
380}
381
382static int __init svia_init(void)
383{
384 return pci_module_init(&svia_pci_driver);
385}
386
387static void __exit svia_exit(void)
388{
389 pci_unregister_driver(&svia_pci_driver);
390}
391
392module_init(svia_init);
393module_exit(svia_exit);
394
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
deleted file mode 100644
index 6d0c4f18e652..000000000000
--- a/drivers/scsi/sata_vsc.c
+++ /dev/null
@@ -1,486 +0,0 @@
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/device.h>
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "1.2"
51
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
56
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
71
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
76
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
81
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
97};
98
99
100#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102
103
104static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{
106 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
109}
110
111
112static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
113 u32 val)
114{
115 if (sc_reg > SCR_CONTROL)
116 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
118}
119
120
121static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
122{
123 void __iomem *mask_addr;
124 u8 mask;
125
126 mask_addr = ap->host_set->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN)
130 mask |= 0x80;
131 else
132 mask &= 0x7F;
133 writeb(mask, mask_addr);
134}
135
136
137static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
138{
139 struct ata_ioports *ioaddr = &ap->ioaddr;
140 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
141
142 /*
143 * The only thing the ctl register is used for is SRST.
144 * That is not enabled or disabled via tf_load.
145 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
146 */
147 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
148 ap->last_ctl = tf->ctl;
149 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
150 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
153 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
154 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
155 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
156 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
157 } else if (is_addr) {
158 writew(tf->feature, ioaddr->feature_addr);
159 writew(tf->nsect, ioaddr->nsect_addr);
160 writew(tf->lbal, ioaddr->lbal_addr);
161 writew(tf->lbam, ioaddr->lbam_addr);
162 writew(tf->lbah, ioaddr->lbah_addr);
163 }
164
165 if (tf->flags & ATA_TFLAG_DEVICE)
166 writeb(tf->device, ioaddr->device_addr);
167
168 ata_wait_idle(ap);
169}
170
171
172static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
173{
174 struct ata_ioports *ioaddr = &ap->ioaddr;
175 u16 nsect, lbal, lbam, lbah, feature;
176
177 tf->command = ata_check_status(ap);
178 tf->device = readw(ioaddr->device_addr);
179 feature = readw(ioaddr->error_addr);
180 nsect = readw(ioaddr->nsect_addr);
181 lbal = readw(ioaddr->lbal_addr);
182 lbam = readw(ioaddr->lbam_addr);
183 lbah = readw(ioaddr->lbah_addr);
184
185 tf->feature = feature;
186 tf->nsect = nsect;
187 tf->lbal = lbal;
188 tf->lbam = lbam;
189 tf->lbah = lbah;
190
191 if (tf->flags & ATA_TFLAG_LBA48) {
192 tf->hob_feature = feature >> 8;
193 tf->hob_nsect = nsect >> 8;
194 tf->hob_lbal = lbal >> 8;
195 tf->hob_lbam = lbam >> 8;
196 tf->hob_lbah = lbah >> 8;
197 }
198}
199
200
201/*
202 * vsc_sata_interrupt
203 *
204 * Read the interrupt register and process for the devices that have them pending.
205 */
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs)
208{
209 struct ata_host_set *host_set = dev_instance;
210 unsigned int i;
211 unsigned int handled = 0;
212 u32 int_status;
213
214 spin_lock(&host_set->lock);
215
216 int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217
218 for (i = 0; i < host_set->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap;
221
222 ap = host_set->ports[i];
223
224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc;
234
235 qc = ata_qc_from_tag(ap, ap->active_tag);
236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
237 handled += ata_host_intr(ap, qc);
238 else if (is_vsc_sata_int_err(i, int_status)) {
239 /*
240 * On some chips (i.e. Intel 31244), an error
241 * interrupt will sneak in at initialization
242 * time (phy state changes). Clearing the SCR
243 * error register is not required, but it prevents
244 * the phy state change interrupts from recurring
245 * later.
246 */
247 u32 err_status;
248 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
249 printk(KERN_DEBUG "%s: clearing interrupt, "
250 "status %x; sata err status %x\n",
251 __FUNCTION__,
252 int_status, err_status);
253 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
254 /* Clear interrupt status */
255 ata_chk_status(ap);
256 handled++;
257 }
258 }
259 }
260 }
261
262 spin_unlock(&host_set->lock);
263
264 return IRQ_RETVAL(handled);
265}
266
267
268static struct scsi_host_template vsc_sata_sht = {
269 .module = THIS_MODULE,
270 .name = DRV_NAME,
271 .ioctl = ata_scsi_ioctl,
272 .queuecommand = ata_scsi_queuecmd,
273 .can_queue = ATA_DEF_QUEUE,
274 .this_id = ATA_SHT_THIS_ID,
275 .sg_tablesize = LIBATA_MAX_PRD,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
283 .bios_param = ata_std_bios_param,
284};
285
286
287static const struct ata_port_operations vsc_sata_ops = {
288 .port_disable = ata_port_disable,
289 .tf_load = vsc_sata_tf_load,
290 .tf_read = vsc_sata_tf_read,
291 .exec_command = ata_exec_command,
292 .check_status = ata_check_status,
293 .dev_select = ata_std_dev_select,
294 .bmdma_setup = ata_bmdma_setup,
295 .bmdma_start = ata_bmdma_start,
296 .bmdma_stop = ata_bmdma_stop,
297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
305 .irq_handler = vsc_sata_interrupt,
306 .irq_clear = ata_bmdma_irq_clear,
307 .scr_read = vsc_sata_scr_read,
308 .scr_write = vsc_sata_scr_write,
309 .port_start = ata_port_start,
310 .port_stop = ata_port_stop,
311 .host_stop = ata_pci_host_stop,
312};
313
314static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
315{
316 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
317 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
318 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
319 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
320 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
321 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
322 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
323 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
324 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
325 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
326 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
327 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
328 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
329 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
330 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
331 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
332 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
333}
334
335
336static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
337{
338 static int printed_version;
339 struct ata_probe_ent *probe_ent = NULL;
340 unsigned long base;
341 int pci_dev_busy = 0;
342 void __iomem *mmio_base;
343 int rc;
344
345 if (!printed_version++)
346 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
347
348 rc = pci_enable_device(pdev);
349 if (rc)
350 return rc;
351
352 /*
353 * Check if we have needed resource mapped.
354 */
355 if (pci_resource_len(pdev, 0) == 0) {
356 rc = -ENODEV;
357 goto err_out;
358 }
359
360 rc = pci_request_regions(pdev, DRV_NAME);
361 if (rc) {
362 pci_dev_busy = 1;
363 goto err_out;
364 }
365
366 /*
367 * Use 32 bit DMA mask, because 64 bit address support is poor.
368 */
369 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
370 if (rc)
371 goto err_out_regions;
372 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
373 if (rc)
374 goto err_out_regions;
375
376 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
377 if (probe_ent == NULL) {
378 rc = -ENOMEM;
379 goto err_out_regions;
380 }
381 memset(probe_ent, 0, sizeof(*probe_ent));
382 probe_ent->dev = pci_dev_to_dev(pdev);
383 INIT_LIST_HEAD(&probe_ent->node);
384
385 mmio_base = pci_iomap(pdev, 0, 0);
386 if (mmio_base == NULL) {
387 rc = -ENOMEM;
388 goto err_out_free_ent;
389 }
390 base = (unsigned long) mmio_base;
391
392 /*
393 * Due to a bug in the chip, the default cache line size can't be used
394 */
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396
397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4;
402 probe_ent->irq = pdev->irq;
403 probe_ent->irq_flags = SA_SHIRQ;
404 probe_ent->mmio_base = mmio_base;
405
406 /* We don't care much about the PIO/UDMA masks, but the core won't like us
407 * if we don't fill these
408 */
409 probe_ent->pio_mask = 0x1f;
410 probe_ent->mwdma_mask = 0x07;
411 probe_ent->udma_mask = 0x7f;
412
413 /* We have 4 ports per PCI function */
414 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
415 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
416 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
417 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
418
419 pci_set_master(pdev);
420
421 /*
422 * Config offset 0x98 is "Extended Control and Status Register 0"
423 * Default value is (1 << 28). All bits except bit 28 are reserved in
424 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
425 * If bit 28 is clear, each port has its own LED.
426 */
427 pci_write_config_dword(pdev, 0x98, 0);
428
429 /* FIXME: check ata_device_add return value */
430 ata_device_add(probe_ent);
431 kfree(probe_ent);
432
433 return 0;
434
435err_out_free_ent:
436 kfree(probe_ent);
437err_out_regions:
438 pci_release_regions(pdev);
439err_out:
440 if (!pci_dev_busy)
441 pci_disable_device(pdev);
442 return rc;
443}
444
445
446/*
447 * Intel 31244 is supposed to be identical.
448 * Compatibility is untested as of yet.
449 */
450static const struct pci_device_id vsc_sata_pci_tbl[] = {
451 { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174,
452 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
453 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244,
454 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
455 { }
456};
457
458
459static struct pci_driver vsc_sata_pci_driver = {
460 .name = DRV_NAME,
461 .id_table = vsc_sata_pci_tbl,
462 .probe = vsc_sata_init_one,
463 .remove = ata_pci_remove_one,
464};
465
466
467static int __init vsc_sata_init(void)
468{
469 return pci_module_init(&vsc_sata_pci_driver);
470}
471
472
473static void __exit vsc_sata_exit(void)
474{
475 pci_unregister_driver(&vsc_sata_pci_driver);
476}
477
478
479MODULE_AUTHOR("Jeremy Higdon");
480MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
481MODULE_LICENSE("GPL");
482MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
483MODULE_VERSION(DRV_VERSION);
484
485module_init(vsc_sata_init);
486module_exit(vsc_sata_exit);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ab7df0dcfe8..7a054f9d1ee3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -96,7 +96,11 @@ unsigned int scsi_logging_level;
96EXPORT_SYMBOL(scsi_logging_level); 96EXPORT_SYMBOL(scsi_logging_level);
97#endif 97#endif
98 98
99const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is
101 * encouraged once assigned by ANSI/INCITS T10
102 */
103static const char *const scsi_device_types[] = {
100 "Direct-Access ", 104 "Direct-Access ",
101 "Sequential-Access", 105 "Sequential-Access",
102 "Printer ", 106 "Printer ",
@@ -107,13 +111,29 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
107 "Optical Device ", 111 "Optical Device ",
108 "Medium Changer ", 112 "Medium Changer ",
109 "Communications ", 113 "Communications ",
110 "Unknown ", 114 "ASC IT8 ",
111 "Unknown ", 115 "ASC IT8 ",
112 "RAID ", 116 "RAID ",
113 "Enclosure ", 117 "Enclosure ",
114 "Direct-Access-RBC", 118 "Direct-Access-RBC",
119 "Optical card ",
120 "Bridge controller",
121 "Object storage ",
122 "Automation/Drive ",
115}; 123};
116EXPORT_SYMBOL(scsi_device_types); 124
125const char * scsi_device_type(unsigned type)
126{
127 if (type == 0x1e)
128 return "Well-known LUN ";
129 if (type == 0x1f)
130 return "No Device ";
131 if (type > ARRAY_SIZE(scsi_device_types))
132 return "Unknown ";
133 return scsi_device_types[type];
134}
135
136EXPORT_SYMBOL(scsi_device_type);
117 137
118struct scsi_host_cmd_pool { 138struct scsi_host_cmd_pool {
119 kmem_cache_t *slab; 139 kmem_cache_t *slab;
@@ -346,7 +366,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
346 if (level > 3) { 366 if (level > 3) {
347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 367 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
348 " done = 0x%p, queuecommand 0x%p\n", 368 " done = 0x%p, queuecommand 0x%p\n",
349 cmd->buffer, cmd->bufflen, 369 cmd->request_buffer, cmd->request_bufflen,
350 cmd->done, 370 cmd->done,
351 sdev->host->hostt->queuecommand); 371 sdev->host->hostt->queuecommand);
352 372
@@ -661,11 +681,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
661 */ 681 */
662int scsi_retry_command(struct scsi_cmnd *cmd) 682int scsi_retry_command(struct scsi_cmnd *cmd)
663{ 683{
664 /*
665 * Restore the SCSI command state.
666 */
667 scsi_setup_cmd_retry(cmd);
668
669 /* 684 /*
670 * Zero the sense information from the last time we tried 685 * Zero the sense information from the last time we tried
671 * this command. 686 * this command.
@@ -711,10 +726,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
711 "Notifying upper driver of completion " 726 "Notifying upper driver of completion "
712 "(result %x)\n", cmd->result)); 727 "(result %x)\n", cmd->result));
713 728
714 /*
715 * We can get here with use_sg=0, causing a panic in the upper level
716 */
717 cmd->use_sg = cmd->old_use_sg;
718 cmd->done(cmd); 729 cmd->done(cmd);
719} 730}
720EXPORT_SYMBOL(scsi_finish_command); 731EXPORT_SYMBOL(scsi_finish_command);
@@ -844,14 +855,14 @@ EXPORT_SYMBOL(scsi_track_queue_full);
844 */ 855 */
845int scsi_device_get(struct scsi_device *sdev) 856int scsi_device_get(struct scsi_device *sdev)
846{ 857{
847 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 858 if (sdev->sdev_state == SDEV_DEL)
848 return -ENXIO; 859 return -ENXIO;
849 if (!get_device(&sdev->sdev_gendev)) 860 if (!get_device(&sdev->sdev_gendev))
850 return -ENXIO; 861 return -ENXIO;
851 if (!try_module_get(sdev->host->hostt->module)) { 862 /* We can fail this if we're doing SCSI operations
852 put_device(&sdev->sdev_gendev); 863 * from module exit (like cache flush) */
853 return -ENXIO; 864 try_module_get(sdev->host->hostt->module);
854 } 865
855 return 0; 866 return 0;
856} 867}
857EXPORT_SYMBOL(scsi_device_get); 868EXPORT_SYMBOL(scsi_device_get);
@@ -866,7 +877,14 @@ EXPORT_SYMBOL(scsi_device_get);
866 */ 877 */
867void scsi_device_put(struct scsi_device *sdev) 878void scsi_device_put(struct scsi_device *sdev)
868{ 879{
869 module_put(sdev->host->hostt->module); 880 struct module *module = sdev->host->hostt->module;
881
882#ifdef CONFIG_MODULE_UNLOAD
883 /* The module refcount will be zero if scsi_device_get()
884 * was called from a module removal routine */
885 if (module && module_refcount(module) != 0)
886 module_put(module);
887#endif
870 put_device(&sdev->sdev_gendev); 888 put_device(&sdev->sdev_gendev);
871} 889}
872EXPORT_SYMBOL(scsi_device_put); 890EXPORT_SYMBOL(scsi_device_put);
@@ -1108,6 +1126,8 @@ static int __init init_scsi(void)
1108 for_each_possible_cpu(i) 1126 for_each_possible_cpu(i)
1109 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1127 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1110 1128
1129 scsi_netlink_init();
1130
1111 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1131 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1112 return 0; 1132 return 0;
1113 1133
@@ -1128,6 +1148,7 @@ cleanup_queue:
1128 1148
1129static void __exit exit_scsi(void) 1149static void __exit exit_scsi(void)
1130{ 1150{
1151 scsi_netlink_exit();
1131 scsi_sysfs_unregister(); 1152 scsi_sysfs_unregister();
1132 scsi_exit_sysctl(); 1153 scsi_exit_sysctl();
1133 scsi_exit_hosts(); 1154 scsi_exit_hosts();
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index f51e466893e7..d5a55fae60e0 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -20,8 +20,6 @@
20#ifndef _SCSI_H 20#ifndef _SCSI_H
21#define _SCSI_H 21#define _SCSI_H
22 22
23#include <linux/config.h> /* for CONFIG_SCSI_LOGGING */
24
25#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h> 24#include <scsi/scsi_device.h>
27#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5a5d2af8ee43..9c0f35820e3e 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * linux/kernel/scsi_debug.c
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale 3 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking 4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
@@ -8,7 +7,9 @@
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * 8 *
10 * This version is more generic, simulating a variable number of disk 9 * This version is more generic, simulating a variable number of disk
11 * (or disk like devices) sharing a common amount of RAM 10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
12 * 13 *
13 * 14 *
14 * For documentation see http://www.torque.net/sg/sdebug26.html 15 * For documentation see http://www.torque.net/sg/sdebug26.html
@@ -24,7 +25,6 @@
24 * module options to "modprobe scsi_debug num_tgts=2" [20021221] 25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
25 */ 26 */
26 27
27#include <linux/config.h>
28#include <linux/module.h> 28#include <linux/module.h>
29 29
30#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -51,18 +51,22 @@
51#include "scsi_logging.h" 51#include "scsi_logging.h"
52#include "scsi_debug.h" 52#include "scsi_debug.h"
53 53
54#define SCSI_DEBUG_VERSION "1.75" 54#define SCSI_DEBUG_VERSION "1.80"
55static const char * scsi_debug_version_date = "20050113"; 55static const char * scsi_debug_version_date = "20060914";
56 56
57/* Additional Sense Code (ASC) used */ 57/* Additional Sense Code (ASC) used */
58#define NO_ADDED_SENSE 0x0 58#define NO_ADDITIONAL_SENSE 0x0
59#define LOGICAL_UNIT_NOT_READY 0x4
59#define UNRECOVERED_READ_ERR 0x11 60#define UNRECOVERED_READ_ERR 0x11
61#define PARAMETER_LIST_LENGTH_ERR 0x1a
60#define INVALID_OPCODE 0x20 62#define INVALID_OPCODE 0x20
61#define ADDR_OUT_OF_RANGE 0x21 63#define ADDR_OUT_OF_RANGE 0x21
62#define INVALID_FIELD_IN_CDB 0x24 64#define INVALID_FIELD_IN_CDB 0x24
65#define INVALID_FIELD_IN_PARAM_LIST 0x26
63#define POWERON_RESET 0x29 66#define POWERON_RESET 0x29
64#define SAVING_PARAMS_UNSUP 0x39 67#define SAVING_PARAMS_UNSUP 0x39
65#define THRESHHOLD_EXCEEDED 0x5d 68#define THRESHOLD_EXCEEDED 0x5d
69#define LOW_POWER_COND_ON 0x5e
66 70
67#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ 71#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
68 72
@@ -81,6 +85,10 @@ static const char * scsi_debug_version_date = "20050113";
81#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ 85#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
82#define DEF_PTYPE 0 86#define DEF_PTYPE 0
83#define DEF_D_SENSE 0 87#define DEF_D_SENSE 0
88#define DEF_NO_LUN_0 0
89#define DEF_VIRTUAL_GB 0
90#define DEF_FAKE_RW 0
91#define DEF_VPD_USE_HOSTNO 1
84 92
85/* bit mask values for scsi_debug_opts */ 93/* bit mask values for scsi_debug_opts */
86#define SCSI_DEBUG_OPT_NOISE 1 94#define SCSI_DEBUG_OPT_NOISE 1
@@ -107,6 +115,7 @@ static const char * scsi_debug_version_date = "20050113";
107/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) 115/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
108 * or "peripheral device" addressing (value 0) */ 116 * or "peripheral device" addressing (value 0) */
109#define SAM2_LUN_ADDRESS_METHOD 0 117#define SAM2_LUN_ADDRESS_METHOD 0
118#define SAM2_WLUN_REPORT_LUNS 0xc101
110 119
111static int scsi_debug_add_host = DEF_NUM_HOST; 120static int scsi_debug_add_host = DEF_NUM_HOST;
112static int scsi_debug_delay = DEF_DELAY; 121static int scsi_debug_delay = DEF_DELAY;
@@ -119,13 +128,18 @@ static int scsi_debug_opts = DEF_OPTS;
119static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; 128static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
120static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ 129static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
121static int scsi_debug_dsense = DEF_D_SENSE; 130static int scsi_debug_dsense = DEF_D_SENSE;
131static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
132static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
133static int scsi_debug_fake_rw = DEF_FAKE_RW;
134static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
122 135
123static int scsi_debug_cmnd_count = 0; 136static int scsi_debug_cmnd_count = 0;
124 137
125#define DEV_READONLY(TGT) (0) 138#define DEV_READONLY(TGT) (0)
126#define DEV_REMOVEABLE(TGT) (0) 139#define DEV_REMOVEABLE(TGT) (0)
127 140
128static unsigned long sdebug_store_size; /* in bytes */ 141static unsigned int sdebug_store_size; /* in bytes */
142static unsigned int sdebug_store_sectors;
129static sector_t sdebug_capacity; /* in sectors */ 143static sector_t sdebug_capacity; /* in sectors */
130 144
131/* old BIOS stuff, kernel may get rid of them but some mode sense pages 145/* old BIOS stuff, kernel may get rid of them but some mode sense pages
@@ -150,7 +164,9 @@ struct sdebug_dev_info {
150 unsigned int target; 164 unsigned int target;
151 unsigned int lun; 165 unsigned int lun;
152 struct sdebug_host_info *sdbg_host; 166 struct sdebug_host_info *sdbg_host;
167 unsigned int wlun;
153 char reset; 168 char reset;
169 char stopped;
154 char used; 170 char used;
155}; 171};
156 172
@@ -194,11 +210,11 @@ static struct scsi_host_template sdebug_driver_template = {
194 .bios_param = scsi_debug_biosparam, 210 .bios_param = scsi_debug_biosparam,
195 .can_queue = SCSI_DEBUG_CANQUEUE, 211 .can_queue = SCSI_DEBUG_CANQUEUE,
196 .this_id = 7, 212 .this_id = 7,
197 .sg_tablesize = 64, 213 .sg_tablesize = 256,
198 .cmd_per_lun = 3, 214 .cmd_per_lun = 16,
199 .max_sectors = 4096, 215 .max_sectors = 0xffff,
200 .unchecked_isa_dma = 0, 216 .unchecked_isa_dma = 0,
201 .use_clustering = DISABLE_CLUSTERING, 217 .use_clustering = ENABLE_CLUSTERING,
202 .module = THIS_MODULE, 218 .module = THIS_MODULE,
203}; 219};
204 220
@@ -226,19 +242,32 @@ static struct device_driver sdebug_driverfs_driver = {
226static const int check_condition_result = 242static const int check_condition_result =
227 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 243 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
228 244
245static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
246 0, 0, 0x2, 0x4b};
247static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
248 0, 0, 0x0, 0x0};
249
229/* function declarations */ 250/* function declarations */
230static int resp_inquiry(struct scsi_cmnd * SCpnt, int target, 251static int resp_inquiry(struct scsi_cmnd * SCpnt, int target,
231 struct sdebug_dev_info * devip); 252 struct sdebug_dev_info * devip);
232static int resp_requests(struct scsi_cmnd * SCpnt, 253static int resp_requests(struct scsi_cmnd * SCpnt,
233 struct sdebug_dev_info * devip); 254 struct sdebug_dev_info * devip);
255static int resp_start_stop(struct scsi_cmnd * scp,
256 struct sdebug_dev_info * devip);
234static int resp_readcap(struct scsi_cmnd * SCpnt, 257static int resp_readcap(struct scsi_cmnd * SCpnt,
235 struct sdebug_dev_info * devip); 258 struct sdebug_dev_info * devip);
236static int resp_mode_sense(struct scsi_cmnd * SCpnt, int target, 259static int resp_readcap16(struct scsi_cmnd * SCpnt,
260 struct sdebug_dev_info * devip);
261static int resp_mode_sense(struct scsi_cmnd * scp, int target,
237 struct sdebug_dev_info * devip); 262 struct sdebug_dev_info * devip);
238static int resp_read(struct scsi_cmnd * SCpnt, int upper_blk, int block, 263static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
239 int num, struct sdebug_dev_info * devip); 264 struct sdebug_dev_info * devip);
240static int resp_write(struct scsi_cmnd * SCpnt, int upper_blk, int block, 265static int resp_log_sense(struct scsi_cmnd * scp,
241 int num, struct sdebug_dev_info * devip); 266 struct sdebug_dev_info * devip);
267static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
268 unsigned int num, struct sdebug_dev_info * devip);
269static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
270 unsigned int num, struct sdebug_dev_info * devip);
242static int resp_report_luns(struct scsi_cmnd * SCpnt, 271static int resp_report_luns(struct scsi_cmnd * SCpnt,
243 struct sdebug_dev_info * devip); 272 struct sdebug_dev_info * devip);
244static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, 273static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
@@ -249,8 +278,8 @@ static void timer_intr_handler(unsigned long);
249static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev); 278static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev);
250static void mk_sense_buffer(struct sdebug_dev_info * devip, int key, 279static void mk_sense_buffer(struct sdebug_dev_info * devip, int key,
251 int asc, int asq); 280 int asc, int asq);
252static int check_reset(struct scsi_cmnd * SCpnt, 281static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
253 struct sdebug_dev_info * devip); 282 struct sdebug_dev_info * devip);
254static int schedule_resp(struct scsi_cmnd * cmnd, 283static int schedule_resp(struct scsi_cmnd * cmnd,
255 struct sdebug_dev_info * devip, 284 struct sdebug_dev_info * devip,
256 done_funct_t done, int scsi_result, int delta_jiff); 285 done_funct_t done, int scsi_result, int delta_jiff);
@@ -258,9 +287,11 @@ static void __init sdebug_build_parts(unsigned char * ramp);
258static void __init init_all_queued(void); 287static void __init init_all_queued(void);
259static void stop_all_queued(void); 288static void stop_all_queued(void);
260static int stop_queued_cmnd(struct scsi_cmnd * cmnd); 289static int stop_queued_cmnd(struct scsi_cmnd * cmnd);
261static int inquiry_evpd_83(unsigned char * arr, int dev_id_num, 290static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
262 const char * dev_id_str, int dev_id_str_len); 291 int dev_id_num, const char * dev_id_str,
263static void do_create_driverfs_files(void); 292 int dev_id_str_len);
293static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
294static int do_create_driverfs_files(void);
264static void do_remove_driverfs_files(void); 295static void do_remove_driverfs_files(void);
265 296
266static int sdebug_add_adapter(void); 297static int sdebug_add_adapter(void);
@@ -275,18 +306,22 @@ static
275int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) 306int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
276{ 307{
277 unsigned char *cmd = (unsigned char *) SCpnt->cmnd; 308 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
278 int block, upper_blk, num, k; 309 int len, k, j;
310 unsigned int num;
311 unsigned long long lba;
279 int errsts = 0; 312 int errsts = 0;
280 int target = scmd_id(SCpnt); 313 int target = SCpnt->device->id;
281 struct sdebug_dev_info * devip = NULL; 314 struct sdebug_dev_info * devip = NULL;
282 int inj_recovered = 0; 315 int inj_recovered = 0;
316 int delay_override = 0;
283 317
284 if (done == NULL) 318 if (done == NULL)
285 return 0; /* assume mid level reprocessing command */ 319 return 0; /* assume mid level reprocessing command */
286 320
321 SCpnt->resid = 0;
287 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { 322 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
288 printk(KERN_INFO "scsi_debug: cmd "); 323 printk(KERN_INFO "scsi_debug: cmd ");
289 for (k = 0, num = SCpnt->cmd_len; k < num; ++k) 324 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
290 printk("%02x ", (int)cmd[k]); 325 printk("%02x ", (int)cmd[k]);
291 printk("\n"); 326 printk("\n");
292 } 327 }
@@ -297,7 +332,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
297 DID_NO_CONNECT << 16, 0); 332 DID_NO_CONNECT << 16, 0);
298 } 333 }
299 334
300 if (SCpnt->device->lun >= scsi_debug_max_luns) 335 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
336 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
301 return schedule_resp(SCpnt, NULL, done, 337 return schedule_resp(SCpnt, NULL, done,
302 DID_NO_CONNECT << 16, 0); 338 DID_NO_CONNECT << 16, 0);
303 devip = devInfoReg(SCpnt->device); 339 devip = devInfoReg(SCpnt->device);
@@ -316,118 +352,154 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
316 inj_recovered = 1; /* to reads and writes below */ 352 inj_recovered = 1; /* to reads and writes below */
317 } 353 }
318 354
355 if (devip->wlun) {
356 switch (*cmd) {
357 case INQUIRY:
358 case REQUEST_SENSE:
359 case TEST_UNIT_READY:
360 case REPORT_LUNS:
361 break; /* only allowable wlun commands */
362 default:
363 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
364 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
365 "not supported for wlun\n", *cmd);
366 mk_sense_buffer(devip, ILLEGAL_REQUEST,
367 INVALID_OPCODE, 0);
368 errsts = check_condition_result;
369 return schedule_resp(SCpnt, devip, done, errsts,
370 0);
371 }
372 }
373
319 switch (*cmd) { 374 switch (*cmd) {
320 case INQUIRY: /* mandatory, ignore unit attention */ 375 case INQUIRY: /* mandatory, ignore unit attention */
376 delay_override = 1;
321 errsts = resp_inquiry(SCpnt, target, devip); 377 errsts = resp_inquiry(SCpnt, target, devip);
322 break; 378 break;
323 case REQUEST_SENSE: /* mandatory, ignore unit attention */ 379 case REQUEST_SENSE: /* mandatory, ignore unit attention */
380 delay_override = 1;
324 errsts = resp_requests(SCpnt, devip); 381 errsts = resp_requests(SCpnt, devip);
325 break; 382 break;
326 case REZERO_UNIT: /* actually this is REWIND for SSC */ 383 case REZERO_UNIT: /* actually this is REWIND for SSC */
327 case START_STOP: 384 case START_STOP:
328 errsts = check_reset(SCpnt, devip); 385 errsts = resp_start_stop(SCpnt, devip);
329 break; 386 break;
330 case ALLOW_MEDIUM_REMOVAL: 387 case ALLOW_MEDIUM_REMOVAL:
331 if ((errsts = check_reset(SCpnt, devip))) 388 if ((errsts = check_readiness(SCpnt, 1, devip)))
332 break; 389 break;
333 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 390 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
334 printk(KERN_INFO "scsi_debug: Medium removal %s\n", 391 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
335 cmd[4] ? "inhibited" : "enabled"); 392 cmd[4] ? "inhibited" : "enabled");
336 break; 393 break;
337 case SEND_DIAGNOSTIC: /* mandatory */ 394 case SEND_DIAGNOSTIC: /* mandatory */
338 errsts = check_reset(SCpnt, devip); 395 errsts = check_readiness(SCpnt, 1, devip);
339 break; 396 break;
340 case TEST_UNIT_READY: /* mandatory */ 397 case TEST_UNIT_READY: /* mandatory */
341 errsts = check_reset(SCpnt, devip); 398 delay_override = 1;
399 errsts = check_readiness(SCpnt, 0, devip);
342 break; 400 break;
343 case RESERVE: 401 case RESERVE:
344 errsts = check_reset(SCpnt, devip); 402 errsts = check_readiness(SCpnt, 1, devip);
345 break; 403 break;
346 case RESERVE_10: 404 case RESERVE_10:
347 errsts = check_reset(SCpnt, devip); 405 errsts = check_readiness(SCpnt, 1, devip);
348 break; 406 break;
349 case RELEASE: 407 case RELEASE:
350 errsts = check_reset(SCpnt, devip); 408 errsts = check_readiness(SCpnt, 1, devip);
351 break; 409 break;
352 case RELEASE_10: 410 case RELEASE_10:
353 errsts = check_reset(SCpnt, devip); 411 errsts = check_readiness(SCpnt, 1, devip);
354 break; 412 break;
355 case READ_CAPACITY: 413 case READ_CAPACITY:
356 errsts = resp_readcap(SCpnt, devip); 414 errsts = resp_readcap(SCpnt, devip);
357 break; 415 break;
416 case SERVICE_ACTION_IN:
417 if (SAI_READ_CAPACITY_16 != cmd[1]) {
418 mk_sense_buffer(devip, ILLEGAL_REQUEST,
419 INVALID_OPCODE, 0);
420 errsts = check_condition_result;
421 break;
422 }
423 errsts = resp_readcap16(SCpnt, devip);
424 break;
358 case READ_16: 425 case READ_16:
359 case READ_12: 426 case READ_12:
360 case READ_10: 427 case READ_10:
361 case READ_6: 428 case READ_6:
362 if ((errsts = check_reset(SCpnt, devip))) 429 if ((errsts = check_readiness(SCpnt, 0, devip)))
430 break;
431 if (scsi_debug_fake_rw)
363 break; 432 break;
364 upper_blk = 0;
365 if ((*cmd) == READ_16) { 433 if ((*cmd) == READ_16) {
366 upper_blk = cmd[5] + (cmd[4] << 8) + 434 for (lba = 0, j = 0; j < 8; ++j) {
367 (cmd[3] << 16) + (cmd[2] << 24); 435 if (j > 0)
368 block = cmd[9] + (cmd[8] << 8) + 436 lba <<= 8;
369 (cmd[7] << 16) + (cmd[6] << 24); 437 lba += cmd[2 + j];
438 }
370 num = cmd[13] + (cmd[12] << 8) + 439 num = cmd[13] + (cmd[12] << 8) +
371 (cmd[11] << 16) + (cmd[10] << 24); 440 (cmd[11] << 16) + (cmd[10] << 24);
372 } else if ((*cmd) == READ_12) { 441 } else if ((*cmd) == READ_12) {
373 block = cmd[5] + (cmd[4] << 8) + 442 lba = cmd[5] + (cmd[4] << 8) +
374 (cmd[3] << 16) + (cmd[2] << 24); 443 (cmd[3] << 16) + (cmd[2] << 24);
375 num = cmd[9] + (cmd[8] << 8) + 444 num = cmd[9] + (cmd[8] << 8) +
376 (cmd[7] << 16) + (cmd[6] << 24); 445 (cmd[7] << 16) + (cmd[6] << 24);
377 } else if ((*cmd) == READ_10) { 446 } else if ((*cmd) == READ_10) {
378 block = cmd[5] + (cmd[4] << 8) + 447 lba = cmd[5] + (cmd[4] << 8) +
379 (cmd[3] << 16) + (cmd[2] << 24); 448 (cmd[3] << 16) + (cmd[2] << 24);
380 num = cmd[8] + (cmd[7] << 8); 449 num = cmd[8] + (cmd[7] << 8);
381 } else { 450 } else { /* READ (6) */
382 block = cmd[3] + (cmd[2] << 8) + 451 lba = cmd[3] + (cmd[2] << 8) +
383 ((cmd[1] & 0x1f) << 16); 452 ((cmd[1] & 0x1f) << 16);
384 num = cmd[4]; 453 num = (0 == cmd[4]) ? 256 : cmd[4];
385 } 454 }
386 errsts = resp_read(SCpnt, upper_blk, block, num, devip); 455 errsts = resp_read(SCpnt, lba, num, devip);
387 if (inj_recovered && (0 == errsts)) { 456 if (inj_recovered && (0 == errsts)) {
388 mk_sense_buffer(devip, RECOVERED_ERROR, 457 mk_sense_buffer(devip, RECOVERED_ERROR,
389 THRESHHOLD_EXCEEDED, 0); 458 THRESHOLD_EXCEEDED, 0);
390 errsts = check_condition_result; 459 errsts = check_condition_result;
391 } 460 }
392 break; 461 break;
393 case REPORT_LUNS: /* mandatory, ignore unit attention */ 462 case REPORT_LUNS: /* mandatory, ignore unit attention */
463 delay_override = 1;
394 errsts = resp_report_luns(SCpnt, devip); 464 errsts = resp_report_luns(SCpnt, devip);
395 break; 465 break;
396 case VERIFY: /* 10 byte SBC-2 command */ 466 case VERIFY: /* 10 byte SBC-2 command */
397 errsts = check_reset(SCpnt, devip); 467 errsts = check_readiness(SCpnt, 0, devip);
398 break; 468 break;
399 case WRITE_16: 469 case WRITE_16:
400 case WRITE_12: 470 case WRITE_12:
401 case WRITE_10: 471 case WRITE_10:
402 case WRITE_6: 472 case WRITE_6:
403 if ((errsts = check_reset(SCpnt, devip))) 473 if ((errsts = check_readiness(SCpnt, 0, devip)))
474 break;
475 if (scsi_debug_fake_rw)
404 break; 476 break;
405 upper_blk = 0;
406 if ((*cmd) == WRITE_16) { 477 if ((*cmd) == WRITE_16) {
407 upper_blk = cmd[5] + (cmd[4] << 8) + 478 for (lba = 0, j = 0; j < 8; ++j) {
408 (cmd[3] << 16) + (cmd[2] << 24); 479 if (j > 0)
409 block = cmd[9] + (cmd[8] << 8) + 480 lba <<= 8;
410 (cmd[7] << 16) + (cmd[6] << 24); 481 lba += cmd[2 + j];
482 }
411 num = cmd[13] + (cmd[12] << 8) + 483 num = cmd[13] + (cmd[12] << 8) +
412 (cmd[11] << 16) + (cmd[10] << 24); 484 (cmd[11] << 16) + (cmd[10] << 24);
413 } else if ((*cmd) == WRITE_12) { 485 } else if ((*cmd) == WRITE_12) {
414 block = cmd[5] + (cmd[4] << 8) + 486 lba = cmd[5] + (cmd[4] << 8) +
415 (cmd[3] << 16) + (cmd[2] << 24); 487 (cmd[3] << 16) + (cmd[2] << 24);
416 num = cmd[9] + (cmd[8] << 8) + 488 num = cmd[9] + (cmd[8] << 8) +
417 (cmd[7] << 16) + (cmd[6] << 24); 489 (cmd[7] << 16) + (cmd[6] << 24);
418 } else if ((*cmd) == WRITE_10) { 490 } else if ((*cmd) == WRITE_10) {
419 block = cmd[5] + (cmd[4] << 8) + 491 lba = cmd[5] + (cmd[4] << 8) +
420 (cmd[3] << 16) + (cmd[2] << 24); 492 (cmd[3] << 16) + (cmd[2] << 24);
421 num = cmd[8] + (cmd[7] << 8); 493 num = cmd[8] + (cmd[7] << 8);
422 } else { 494 } else { /* WRITE (6) */
423 block = cmd[3] + (cmd[2] << 8) + 495 lba = cmd[3] + (cmd[2] << 8) +
424 ((cmd[1] & 0x1f) << 16); 496 ((cmd[1] & 0x1f) << 16);
425 num = cmd[4]; 497 num = (0 == cmd[4]) ? 256 : cmd[4];
426 } 498 }
427 errsts = resp_write(SCpnt, upper_blk, block, num, devip); 499 errsts = resp_write(SCpnt, lba, num, devip);
428 if (inj_recovered && (0 == errsts)) { 500 if (inj_recovered && (0 == errsts)) {
429 mk_sense_buffer(devip, RECOVERED_ERROR, 501 mk_sense_buffer(devip, RECOVERED_ERROR,
430 THRESHHOLD_EXCEEDED, 0); 502 THRESHOLD_EXCEEDED, 0);
431 errsts = check_condition_result; 503 errsts = check_condition_result;
432 } 504 }
433 break; 505 break;
@@ -435,20 +507,31 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
435 case MODE_SENSE_10: 507 case MODE_SENSE_10:
436 errsts = resp_mode_sense(SCpnt, target, devip); 508 errsts = resp_mode_sense(SCpnt, target, devip);
437 break; 509 break;
510 case MODE_SELECT:
511 errsts = resp_mode_select(SCpnt, 1, devip);
512 break;
513 case MODE_SELECT_10:
514 errsts = resp_mode_select(SCpnt, 0, devip);
515 break;
516 case LOG_SENSE:
517 errsts = resp_log_sense(SCpnt, devip);
518 break;
438 case SYNCHRONIZE_CACHE: 519 case SYNCHRONIZE_CACHE:
439 errsts = check_reset(SCpnt, devip); 520 delay_override = 1;
521 errsts = check_readiness(SCpnt, 0, devip);
440 break; 522 break;
441 default: 523 default:
442 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 524 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
443 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " 525 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
444 "supported\n", *cmd); 526 "supported\n", *cmd);
445 if ((errsts = check_reset(SCpnt, devip))) 527 if ((errsts = check_readiness(SCpnt, 1, devip)))
446 break; /* Unit attention takes precedence */ 528 break; /* Unit attention takes precedence */
447 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); 529 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
448 errsts = check_condition_result; 530 errsts = check_condition_result;
449 break; 531 break;
450 } 532 }
451 return schedule_resp(SCpnt, devip, done, errsts, scsi_debug_delay); 533 return schedule_resp(SCpnt, devip, done, errsts,
534 (delay_override ? 0 : scsi_debug_delay));
452} 535}
453 536
454static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 537static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
@@ -460,7 +543,8 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
460 /* return -ENOTTY; // correct return but upsets fdisk */ 543 /* return -ENOTTY; // correct return but upsets fdisk */
461} 544}
462 545
463static int check_reset(struct scsi_cmnd * SCpnt, struct sdebug_dev_info * devip) 546static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
547 struct sdebug_dev_info * devip)
464{ 548{
465 if (devip->reset) { 549 if (devip->reset) {
466 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 550 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
@@ -470,6 +554,14 @@ static int check_reset(struct scsi_cmnd * SCpnt, struct sdebug_dev_info * devip)
470 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0); 554 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
471 return check_condition_result; 555 return check_condition_result;
472 } 556 }
557 if ((0 == reset_only) && devip->stopped) {
558 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
559 printk(KERN_INFO "scsi_debug: Reporting Not "
560 "ready: initializing command required\n");
561 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
562 0x2);
563 return check_condition_result;
564 }
473 return 0; 565 return 0;
474} 566}
475 567
@@ -493,7 +585,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
493 req_len = scp->request_bufflen; 585 req_len = scp->request_bufflen;
494 act_len = (req_len < arr_len) ? req_len : arr_len; 586 act_len = (req_len < arr_len) ? req_len : arr_len;
495 memcpy(scp->request_buffer, arr, act_len); 587 memcpy(scp->request_buffer, arr, act_len);
496 scp->resid = req_len - act_len; 588 if (scp->resid)
589 scp->resid -= act_len;
590 else
591 scp->resid = req_len - act_len;
497 return 0; 592 return 0;
498 } 593 }
499 sgpnt = (struct scatterlist *)scp->request_buffer; 594 sgpnt = (struct scatterlist *)scp->request_buffer;
@@ -516,7 +611,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
516 } 611 }
517 req_len += sgpnt->length; 612 req_len += sgpnt->length;
518 } 613 }
519 scp->resid = req_len - act_len; 614 if (scp->resid)
615 scp->resid -= act_len;
616 else
617 scp->resid = req_len - act_len;
520 return 0; 618 return 0;
521} 619}
522 620
@@ -567,12 +665,14 @@ static const char * inq_vendor_id = "Linux ";
567static const char * inq_product_id = "scsi_debug "; 665static const char * inq_product_id = "scsi_debug ";
568static const char * inq_product_rev = "0004"; 666static const char * inq_product_rev = "0004";
569 667
570static int inquiry_evpd_83(unsigned char * arr, int dev_id_num, 668static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
571 const char * dev_id_str, int dev_id_str_len) 669 int dev_id_num, const char * dev_id_str,
670 int dev_id_str_len)
572{ 671{
573 int num; 672 int num, port_a;
673 char b[32];
574 674
575 /* Two identification descriptors: */ 675 port_a = target_dev_id + 1;
576 /* T10 vendor identifier field format (faked) */ 676 /* T10 vendor identifier field format (faked) */
577 arr[0] = 0x2; /* ASCII */ 677 arr[0] = 0x2; /* ASCII */
578 arr[1] = 0x1; 678 arr[1] = 0x1;
@@ -583,25 +683,246 @@ static int inquiry_evpd_83(unsigned char * arr, int dev_id_num,
583 num = 8 + 16 + dev_id_str_len; 683 num = 8 + 16 + dev_id_str_len;
584 arr[3] = num; 684 arr[3] = num;
585 num += 4; 685 num += 4;
586 /* NAA IEEE registered identifier (faked) */ 686 if (dev_id_num >= 0) {
587 arr[num] = 0x1; /* binary */ 687 /* NAA-5, Logical unit identifier (binary) */
588 arr[num + 1] = 0x3; 688 arr[num++] = 0x1; /* binary (not necessarily sas) */
589 arr[num + 2] = 0x0; 689 arr[num++] = 0x3; /* PIV=0, lu, naa */
590 arr[num + 3] = 0x8; 690 arr[num++] = 0x0;
591 arr[num + 4] = 0x51; /* ieee company id=0x123456 (faked) */ 691 arr[num++] = 0x8;
592 arr[num + 5] = 0x23; 692 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
593 arr[num + 6] = 0x45; 693 arr[num++] = 0x33;
594 arr[num + 7] = 0x60; 694 arr[num++] = 0x33;
595 arr[num + 8] = (dev_id_num >> 24); 695 arr[num++] = 0x30;
596 arr[num + 9] = (dev_id_num >> 16) & 0xff; 696 arr[num++] = (dev_id_num >> 24);
597 arr[num + 10] = (dev_id_num >> 8) & 0xff; 697 arr[num++] = (dev_id_num >> 16) & 0xff;
598 arr[num + 11] = dev_id_num & 0xff; 698 arr[num++] = (dev_id_num >> 8) & 0xff;
599 return num + 12; 699 arr[num++] = dev_id_num & 0xff;
700 /* Target relative port number */
701 arr[num++] = 0x61; /* proto=sas, binary */
702 arr[num++] = 0x94; /* PIV=1, target port, rel port */
703 arr[num++] = 0x0; /* reserved */
704 arr[num++] = 0x4; /* length */
705 arr[num++] = 0x0; /* reserved */
706 arr[num++] = 0x0; /* reserved */
707 arr[num++] = 0x0;
708 arr[num++] = 0x1; /* relative port A */
709 }
710 /* NAA-5, Target port identifier */
711 arr[num++] = 0x61; /* proto=sas, binary */
712 arr[num++] = 0x93; /* piv=1, target port, naa */
713 arr[num++] = 0x0;
714 arr[num++] = 0x8;
715 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
716 arr[num++] = 0x22;
717 arr[num++] = 0x22;
718 arr[num++] = 0x20;
719 arr[num++] = (port_a >> 24);
720 arr[num++] = (port_a >> 16) & 0xff;
721 arr[num++] = (port_a >> 8) & 0xff;
722 arr[num++] = port_a & 0xff;
723 /* NAA-5, Target device identifier */
724 arr[num++] = 0x61; /* proto=sas, binary */
725 arr[num++] = 0xa3; /* piv=1, target device, naa */
726 arr[num++] = 0x0;
727 arr[num++] = 0x8;
728 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
729 arr[num++] = 0x22;
730 arr[num++] = 0x22;
731 arr[num++] = 0x20;
732 arr[num++] = (target_dev_id >> 24);
733 arr[num++] = (target_dev_id >> 16) & 0xff;
734 arr[num++] = (target_dev_id >> 8) & 0xff;
735 arr[num++] = target_dev_id & 0xff;
736 /* SCSI name string: Target device identifier */
737 arr[num++] = 0x63; /* proto=sas, UTF-8 */
738 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
739 arr[num++] = 0x0;
740 arr[num++] = 24;
741 memcpy(arr + num, "naa.52222220", 12);
742 num += 12;
743 snprintf(b, sizeof(b), "%08X", target_dev_id);
744 memcpy(arr + num, b, 8);
745 num += 8;
746 memset(arr + num, 0, 4);
747 num += 4;
748 return num;
749}
750
751
752static unsigned char vpd84_data[] = {
753/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
754 0x22,0x22,0x22,0x0,0xbb,0x1,
755 0x22,0x22,0x22,0x0,0xbb,0x2,
756};
757
758static int inquiry_evpd_84(unsigned char * arr)
759{
760 memcpy(arr, vpd84_data, sizeof(vpd84_data));
761 return sizeof(vpd84_data);
762}
763
764static int inquiry_evpd_85(unsigned char * arr)
765{
766 int num = 0;
767 const char * na1 = "https://www.kernel.org/config";
768 const char * na2 = "http://www.kernel.org/log";
769 int plen, olen;
770
771 arr[num++] = 0x1; /* lu, storage config */
772 arr[num++] = 0x0; /* reserved */
773 arr[num++] = 0x0;
774 olen = strlen(na1);
775 plen = olen + 1;
776 if (plen % 4)
777 plen = ((plen / 4) + 1) * 4;
778 arr[num++] = plen; /* length, null termianted, padded */
779 memcpy(arr + num, na1, olen);
780 memset(arr + num + olen, 0, plen - olen);
781 num += plen;
782
783 arr[num++] = 0x4; /* lu, logging */
784 arr[num++] = 0x0; /* reserved */
785 arr[num++] = 0x0;
786 olen = strlen(na2);
787 plen = olen + 1;
788 if (plen % 4)
789 plen = ((plen / 4) + 1) * 4;
790 arr[num++] = plen; /* length, null terminated, padded */
791 memcpy(arr + num, na2, olen);
792 memset(arr + num + olen, 0, plen - olen);
793 num += plen;
794
795 return num;
796}
797
798/* SCSI ports VPD page */
799static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
800{
801 int num = 0;
802 int port_a, port_b;
803
804 port_a = target_dev_id + 1;
805 port_b = port_a + 1;
806 arr[num++] = 0x0; /* reserved */
807 arr[num++] = 0x0; /* reserved */
808 arr[num++] = 0x0;
809 arr[num++] = 0x1; /* relative port 1 (primary) */
810 memset(arr + num, 0, 6);
811 num += 6;
812 arr[num++] = 0x0;
813 arr[num++] = 12; /* length tp descriptor */
814 /* naa-5 target port identifier (A) */
815 arr[num++] = 0x61; /* proto=sas, binary */
816 arr[num++] = 0x93; /* PIV=1, target port, NAA */
817 arr[num++] = 0x0; /* reserved */
818 arr[num++] = 0x8; /* length */
819 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
820 arr[num++] = 0x22;
821 arr[num++] = 0x22;
822 arr[num++] = 0x20;
823 arr[num++] = (port_a >> 24);
824 arr[num++] = (port_a >> 16) & 0xff;
825 arr[num++] = (port_a >> 8) & 0xff;
826 arr[num++] = port_a & 0xff;
827
828 arr[num++] = 0x0; /* reserved */
829 arr[num++] = 0x0; /* reserved */
830 arr[num++] = 0x0;
831 arr[num++] = 0x2; /* relative port 2 (secondary) */
832 memset(arr + num, 0, 6);
833 num += 6;
834 arr[num++] = 0x0;
835 arr[num++] = 12; /* length tp descriptor */
836 /* naa-5 target port identifier (B) */
837 arr[num++] = 0x61; /* proto=sas, binary */
838 arr[num++] = 0x93; /* PIV=1, target port, NAA */
839 arr[num++] = 0x0; /* reserved */
840 arr[num++] = 0x8; /* length */
841 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
842 arr[num++] = 0x22;
843 arr[num++] = 0x22;
844 arr[num++] = 0x20;
845 arr[num++] = (port_b >> 24);
846 arr[num++] = (port_b >> 16) & 0xff;
847 arr[num++] = (port_b >> 8) & 0xff;
848 arr[num++] = port_b & 0xff;
849
850 return num;
851}
852
853
854static unsigned char vpd89_data[] = {
855/* from 4th byte */ 0,0,0,0,
856'l','i','n','u','x',' ',' ',' ',
857'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
858'1','2','3','4',
8590x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
8600xec,0,0,0,
8610x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
8620,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
8630x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
8640x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
8650x53,0x41,
8660x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
8670x20,0x20,
8680x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
8690x10,0x80,
8700,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
8710x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
8720x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
8730,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
8740x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
8750x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
8760,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
8770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8800x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
8810,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
8820xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
8830,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
8840,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8880,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8890,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8900,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8910,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8920,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8930,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8940,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8950,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
896};
897
898static int inquiry_evpd_89(unsigned char * arr)
899{
900 memcpy(arr, vpd89_data, sizeof(vpd89_data));
901 return sizeof(vpd89_data);
902}
903
904
905static unsigned char vpdb0_data[] = {
906 /* from 4th byte */ 0,0,0,4,
907 0,0,0x4,0,
908 0,0,0,64,
909};
910
911static int inquiry_evpd_b0(unsigned char * arr)
912{
913 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
914 if (sdebug_store_sectors > 0x400) {
915 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
916 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
917 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
918 arr[7] = sdebug_store_sectors & 0xff;
919 }
920 return sizeof(vpdb0_data);
600} 921}
601 922
602 923
603#define SDEBUG_LONG_INQ_SZ 96 924#define SDEBUG_LONG_INQ_SZ 96
604#define SDEBUG_MAX_INQ_ARR_SZ 128 925#define SDEBUG_MAX_INQ_ARR_SZ 584
605 926
606static int resp_inquiry(struct scsi_cmnd * scp, int target, 927static int resp_inquiry(struct scsi_cmnd * scp, int target,
607 struct sdebug_dev_info * devip) 928 struct sdebug_dev_info * devip)
@@ -609,64 +930,115 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
609 unsigned char pq_pdt; 930 unsigned char pq_pdt;
610 unsigned char arr[SDEBUG_MAX_INQ_ARR_SZ]; 931 unsigned char arr[SDEBUG_MAX_INQ_ARR_SZ];
611 unsigned char *cmd = (unsigned char *)scp->cmnd; 932 unsigned char *cmd = (unsigned char *)scp->cmnd;
612 int alloc_len; 933 int alloc_len, n;
613 934
614 alloc_len = (cmd[3] << 8) + cmd[4]; 935 alloc_len = (cmd[3] << 8) + cmd[4];
615 memset(arr, 0, SDEBUG_MAX_INQ_ARR_SZ); 936 memset(arr, 0, SDEBUG_MAX_INQ_ARR_SZ);
616 pq_pdt = (scsi_debug_ptype & 0x1f); 937 if (devip->wlun)
938 pq_pdt = 0x1e; /* present, wlun */
939 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
940 pq_pdt = 0x7f; /* not present, no device type */
941 else
942 pq_pdt = (scsi_debug_ptype & 0x1f);
617 arr[0] = pq_pdt; 943 arr[0] = pq_pdt;
618 if (0x2 & cmd[1]) { /* CMDDT bit set */ 944 if (0x2 & cmd[1]) { /* CMDDT bit set */
619 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 945 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
620 0); 946 0);
621 return check_condition_result; 947 return check_condition_result;
622 } else if (0x1 & cmd[1]) { /* EVPD bit set */ 948 } else if (0x1 & cmd[1]) { /* EVPD bit set */
623 int dev_id_num, len; 949 int lu_id_num, target_dev_id, len;
624 char dev_id_str[6]; 950 char lu_id_str[6];
951 int host_no = devip->sdbg_host->shost->host_no;
625 952
626 dev_id_num = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 953 if (0 == scsi_debug_vpd_use_hostno)
627 (devip->target * 1000) + devip->lun; 954 host_no = 0;
628 len = scnprintf(dev_id_str, 6, "%d", dev_id_num); 955 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
956 (devip->target * 1000) + devip->lun);
957 target_dev_id = ((host_no + 1) * 2000) +
958 (devip->target * 1000) - 3;
959 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
629 if (0 == cmd[2]) { /* supported vital product data pages */ 960 if (0 == cmd[2]) { /* supported vital product data pages */
630 arr[3] = 3; 961 arr[1] = cmd[2]; /*sanity */
631 arr[4] = 0x0; /* this page */ 962 n = 4;
632 arr[5] = 0x80; /* unit serial number */ 963 arr[n++] = 0x0; /* this page */
633 arr[6] = 0x83; /* device identification */ 964 arr[n++] = 0x80; /* unit serial number */
965 arr[n++] = 0x83; /* device identification */
966 arr[n++] = 0x84; /* software interface ident. */
967 arr[n++] = 0x85; /* management network addresses */
968 arr[n++] = 0x86; /* extended inquiry */
969 arr[n++] = 0x87; /* mode page policy */
970 arr[n++] = 0x88; /* SCSI ports */
971 arr[n++] = 0x89; /* ATA information */
972 arr[n++] = 0xb0; /* Block limits (SBC) */
973 arr[3] = n - 4; /* number of supported VPD pages */
634 } else if (0x80 == cmd[2]) { /* unit serial number */ 974 } else if (0x80 == cmd[2]) { /* unit serial number */
635 arr[1] = 0x80; 975 arr[1] = cmd[2]; /*sanity */
636 arr[3] = len; 976 arr[3] = len;
637 memcpy(&arr[4], dev_id_str, len); 977 memcpy(&arr[4], lu_id_str, len);
638 } else if (0x83 == cmd[2]) { /* device identification */ 978 } else if (0x83 == cmd[2]) { /* device identification */
639 arr[1] = 0x83; 979 arr[1] = cmd[2]; /*sanity */
640 arr[3] = inquiry_evpd_83(&arr[4], dev_id_num, 980 arr[3] = inquiry_evpd_83(&arr[4], target_dev_id,
641 dev_id_str, len); 981 lu_id_num, lu_id_str, len);
982 } else if (0x84 == cmd[2]) { /* Software interface ident. */
983 arr[1] = cmd[2]; /*sanity */
984 arr[3] = inquiry_evpd_84(&arr[4]);
985 } else if (0x85 == cmd[2]) { /* Management network addresses */
986 arr[1] = cmd[2]; /*sanity */
987 arr[3] = inquiry_evpd_85(&arr[4]);
988 } else if (0x86 == cmd[2]) { /* extended inquiry */
989 arr[1] = cmd[2]; /*sanity */
990 arr[3] = 0x3c; /* number of following entries */
991 arr[4] = 0x0; /* no protection stuff */
992 arr[5] = 0x7; /* head of q, ordered + simple q's */
993 } else if (0x87 == cmd[2]) { /* mode page policy */
994 arr[1] = cmd[2]; /*sanity */
995 arr[3] = 0x8; /* number of following entries */
996 arr[4] = 0x2; /* disconnect-reconnect mp */
997 arr[6] = 0x80; /* mlus, shared */
998 arr[8] = 0x18; /* protocol specific lu */
999 arr[10] = 0x82; /* mlus, per initiator port */
1000 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1001 arr[1] = cmd[2]; /*sanity */
1002 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1003 } else if (0x89 == cmd[2]) { /* ATA information */
1004 arr[1] = cmd[2]; /*sanity */
1005 n = inquiry_evpd_89(&arr[4]);
1006 arr[2] = (n >> 8);
1007 arr[3] = (n & 0xff);
1008 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1009 arr[1] = cmd[2]; /*sanity */
1010 arr[3] = inquiry_evpd_b0(&arr[4]);
642 } else { 1011 } else {
643 /* Illegal request, invalid field in cdb */ 1012 /* Illegal request, invalid field in cdb */
644 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1013 mk_sense_buffer(devip, ILLEGAL_REQUEST,
645 INVALID_FIELD_IN_CDB, 0); 1014 INVALID_FIELD_IN_CDB, 0);
646 return check_condition_result; 1015 return check_condition_result;
647 } 1016 }
1017 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
648 return fill_from_dev_buffer(scp, arr, 1018 return fill_from_dev_buffer(scp, arr,
649 min(alloc_len, SDEBUG_MAX_INQ_ARR_SZ)); 1019 min(len, SDEBUG_MAX_INQ_ARR_SZ));
650 } 1020 }
651 /* drops through here for a standard inquiry */ 1021 /* drops through here for a standard inquiry */
652 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ 1022 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
653 arr[2] = scsi_debug_scsi_level; 1023 arr[2] = scsi_debug_scsi_level;
654 arr[3] = 2; /* response_data_format==2 */ 1024 arr[3] = 2; /* response_data_format==2 */
655 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1025 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
656 arr[6] = 0x1; /* claim: ADDR16 */ 1026 arr[6] = 0x10; /* claim: MultiP */
657 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1027 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
658 arr[7] = 0x3a; /* claim: WBUS16, SYNC, LINKED + CMDQUE */ 1028 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
659 memcpy(&arr[8], inq_vendor_id, 8); 1029 memcpy(&arr[8], inq_vendor_id, 8);
660 memcpy(&arr[16], inq_product_id, 16); 1030 memcpy(&arr[16], inq_product_id, 16);
661 memcpy(&arr[32], inq_product_rev, 4); 1031 memcpy(&arr[32], inq_product_rev, 4);
662 /* version descriptors (2 bytes each) follow */ 1032 /* version descriptors (2 bytes each) follow */
663 arr[58] = 0x0; arr[59] = 0x40; /* SAM-2 */ 1033 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
664 arr[60] = 0x3; arr[61] = 0x0; /* SPC-3 */ 1034 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
1035 n = 62;
665 if (scsi_debug_ptype == 0) { 1036 if (scsi_debug_ptype == 0) {
666 arr[62] = 0x1; arr[63] = 0x80; /* SBC */ 1037 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
667 } else if (scsi_debug_ptype == 1) { 1038 } else if (scsi_debug_ptype == 1) {
668 arr[62] = 0x2; arr[63] = 0x00; /* SSC */ 1039 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
669 } 1040 }
1041 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
670 return fill_from_dev_buffer(scp, arr, 1042 return fill_from_dev_buffer(scp, arr,
671 min(alloc_len, SDEBUG_LONG_INQ_SZ)); 1043 min(alloc_len, SDEBUG_LONG_INQ_SZ));
672} 1044}
@@ -677,46 +1049,128 @@ static int resp_requests(struct scsi_cmnd * scp,
677 unsigned char * sbuff; 1049 unsigned char * sbuff;
678 unsigned char *cmd = (unsigned char *)scp->cmnd; 1050 unsigned char *cmd = (unsigned char *)scp->cmnd;
679 unsigned char arr[SDEBUG_SENSE_LEN]; 1051 unsigned char arr[SDEBUG_SENSE_LEN];
1052 int want_dsense;
680 int len = 18; 1053 int len = 18;
681 1054
682 memset(arr, 0, SDEBUG_SENSE_LEN); 1055 memset(arr, 0, sizeof(arr));
683 if (devip->reset == 1) 1056 if (devip->reset == 1)
684 mk_sense_buffer(devip, 0, NO_ADDED_SENSE, 0); 1057 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
1058 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
685 sbuff = devip->sense_buff; 1059 sbuff = devip->sense_buff;
686 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1060 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
687 /* DESC bit set and sense_buff in fixed format */ 1061 if (want_dsense) {
688 arr[0] = 0x72; 1062 arr[0] = 0x72;
689 arr[1] = sbuff[2]; /* sense key */ 1063 arr[1] = 0x0; /* NO_SENSE in sense_key */
690 arr[2] = sbuff[12]; /* asc */ 1064 arr[2] = THRESHOLD_EXCEEDED;
691 arr[3] = sbuff[13]; /* ascq */ 1065 arr[3] = 0xff; /* TEST set and MRIE==6 */
692 len = 8; 1066 } else {
693 } else 1067 arr[0] = 0x70;
1068 arr[2] = 0x0; /* NO_SENSE in sense_key */
1069 arr[7] = 0xa; /* 18 byte sense buffer */
1070 arr[12] = THRESHOLD_EXCEEDED;
1071 arr[13] = 0xff; /* TEST set and MRIE==6 */
1072 }
1073 } else {
694 memcpy(arr, sbuff, SDEBUG_SENSE_LEN); 1074 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
695 mk_sense_buffer(devip, 0, NO_ADDED_SENSE, 0); 1075 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1076 /* DESC bit set and sense_buff in fixed format */
1077 memset(arr, 0, sizeof(arr));
1078 arr[0] = 0x72;
1079 arr[1] = sbuff[2]; /* sense key */
1080 arr[2] = sbuff[12]; /* asc */
1081 arr[3] = sbuff[13]; /* ascq */
1082 len = 8;
1083 }
1084 }
1085 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
696 return fill_from_dev_buffer(scp, arr, len); 1086 return fill_from_dev_buffer(scp, arr, len);
697} 1087}
698 1088
1089static int resp_start_stop(struct scsi_cmnd * scp,
1090 struct sdebug_dev_info * devip)
1091{
1092 unsigned char *cmd = (unsigned char *)scp->cmnd;
1093 int power_cond, errsts, start;
1094
1095 if ((errsts = check_readiness(scp, 1, devip)))
1096 return errsts;
1097 power_cond = (cmd[4] & 0xf0) >> 4;
1098 if (power_cond) {
1099 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1100 0);
1101 return check_condition_result;
1102 }
1103 start = cmd[4] & 1;
1104 if (start == devip->stopped)
1105 devip->stopped = !start;
1106 return 0;
1107}
1108
699#define SDEBUG_READCAP_ARR_SZ 8 1109#define SDEBUG_READCAP_ARR_SZ 8
700static int resp_readcap(struct scsi_cmnd * scp, 1110static int resp_readcap(struct scsi_cmnd * scp,
701 struct sdebug_dev_info * devip) 1111 struct sdebug_dev_info * devip)
702{ 1112{
703 unsigned char arr[SDEBUG_READCAP_ARR_SZ]; 1113 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
704 unsigned long capac; 1114 unsigned int capac;
705 int errsts; 1115 int errsts;
706 1116
707 if ((errsts = check_reset(scp, devip))) 1117 if ((errsts = check_readiness(scp, 1, devip)))
708 return errsts; 1118 return errsts;
1119 /* following just in case virtual_gb changed */
1120 if (scsi_debug_virtual_gb > 0) {
1121 sdebug_capacity = 2048 * 1024;
1122 sdebug_capacity *= scsi_debug_virtual_gb;
1123 } else
1124 sdebug_capacity = sdebug_store_sectors;
709 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1125 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
710 capac = (unsigned long)sdebug_capacity - 1; 1126 if (sdebug_capacity < 0xffffffff) {
711 arr[0] = (capac >> 24); 1127 capac = (unsigned int)sdebug_capacity - 1;
712 arr[1] = (capac >> 16) & 0xff; 1128 arr[0] = (capac >> 24);
713 arr[2] = (capac >> 8) & 0xff; 1129 arr[1] = (capac >> 16) & 0xff;
714 arr[3] = capac & 0xff; 1130 arr[2] = (capac >> 8) & 0xff;
1131 arr[3] = capac & 0xff;
1132 } else {
1133 arr[0] = 0xff;
1134 arr[1] = 0xff;
1135 arr[2] = 0xff;
1136 arr[3] = 0xff;
1137 }
715 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1138 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
716 arr[7] = SECT_SIZE_PER(target) & 0xff; 1139 arr[7] = SECT_SIZE_PER(target) & 0xff;
717 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 1140 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
718} 1141}
719 1142
1143#define SDEBUG_READCAP16_ARR_SZ 32
1144static int resp_readcap16(struct scsi_cmnd * scp,
1145 struct sdebug_dev_info * devip)
1146{
1147 unsigned char *cmd = (unsigned char *)scp->cmnd;
1148 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1149 unsigned long long capac;
1150 int errsts, k, alloc_len;
1151
1152 if ((errsts = check_readiness(scp, 1, devip)))
1153 return errsts;
1154 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1155 + cmd[13]);
1156 /* following just in case virtual_gb changed */
1157 if (scsi_debug_virtual_gb > 0) {
1158 sdebug_capacity = 2048 * 1024;
1159 sdebug_capacity *= scsi_debug_virtual_gb;
1160 } else
1161 sdebug_capacity = sdebug_store_sectors;
1162 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1163 capac = sdebug_capacity - 1;
1164 for (k = 0; k < 8; ++k, capac >>= 8)
1165 arr[7 - k] = capac & 0xff;
1166 arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff;
1167 arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff;
1168 arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1169 arr[11] = SECT_SIZE_PER(target) & 0xff;
1170 return fill_from_dev_buffer(scp, arr,
1171 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1172}
1173
720/* <<Following mode page info copied from ST318451LW>> */ 1174/* <<Following mode page info copied from ST318451LW>> */
721 1175
722static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) 1176static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
@@ -772,47 +1226,123 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
772 1226
773static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) 1227static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
774{ /* Control mode page for mode_sense */ 1228{ /* Control mode page for mode_sense */
775 unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 1229 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1230 0, 0, 0, 0};
1231 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
776 0, 0, 0x2, 0x4b}; 1232 0, 0, 0x2, 0x4b};
777 1233
778 if (scsi_debug_dsense) 1234 if (scsi_debug_dsense)
779 ctrl_m_pg[2] |= 0x4; 1235 ctrl_m_pg[2] |= 0x4;
1236 else
1237 ctrl_m_pg[2] &= ~0x4;
780 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); 1238 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
781 if (1 == pcontrol) 1239 if (1 == pcontrol)
782 memset(p + 2, 0, sizeof(ctrl_m_pg) - 2); 1240 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1241 else if (2 == pcontrol)
1242 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
783 return sizeof(ctrl_m_pg); 1243 return sizeof(ctrl_m_pg);
784} 1244}
785 1245
1246
786static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target) 1247static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
787{ /* Informational Exceptions control mode page for mode_sense */ 1248{ /* Informational Exceptions control mode page for mode_sense */
788 unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 1249 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
789 0, 0, 0x0, 0x0}; 1250 0, 0, 0x0, 0x0};
1251 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1252 0, 0, 0x0, 0x0};
1253
790 memcpy(p, iec_m_pg, sizeof(iec_m_pg)); 1254 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
791 if (1 == pcontrol) 1255 if (1 == pcontrol)
792 memset(p + 2, 0, sizeof(iec_m_pg) - 2); 1256 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1257 else if (2 == pcontrol)
1258 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
793 return sizeof(iec_m_pg); 1259 return sizeof(iec_m_pg);
794} 1260}
795 1261
1262static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1263{ /* SAS SSP mode page - short format for mode_sense */
1264 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1265 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1266
1267 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1268 if (1 == pcontrol)
1269 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1270 return sizeof(sas_sf_m_pg);
1271}
1272
1273
1274static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1275 int target_dev_id)
1276{ /* SAS phy control and discover mode page for mode_sense */
1277 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1278 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1279 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1280 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1281 0x2, 0, 0, 0, 0, 0, 0, 0,
1282 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1285 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1286 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1287 0x3, 0, 0, 0, 0, 0, 0, 0,
1288 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0,
1290 };
1291 int port_a, port_b;
1292
1293 port_a = target_dev_id + 1;
1294 port_b = port_a + 1;
1295 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1296 p[20] = (port_a >> 24);
1297 p[21] = (port_a >> 16) & 0xff;
1298 p[22] = (port_a >> 8) & 0xff;
1299 p[23] = port_a & 0xff;
1300 p[48 + 20] = (port_b >> 24);
1301 p[48 + 21] = (port_b >> 16) & 0xff;
1302 p[48 + 22] = (port_b >> 8) & 0xff;
1303 p[48 + 23] = port_b & 0xff;
1304 if (1 == pcontrol)
1305 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1306 return sizeof(sas_pcd_m_pg);
1307}
1308
1309static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1310{ /* SAS SSP shared protocol specific port mode subpage */
1311 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1312 0, 0, 0, 0, 0, 0, 0, 0,
1313 };
1314
1315 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1316 if (1 == pcontrol)
1317 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1318 return sizeof(sas_sha_m_pg);
1319}
1320
796#define SDEBUG_MAX_MSENSE_SZ 256 1321#define SDEBUG_MAX_MSENSE_SZ 256
797 1322
798static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1323static int resp_mode_sense(struct scsi_cmnd * scp, int target,
799 struct sdebug_dev_info * devip) 1324 struct sdebug_dev_info * devip)
800{ 1325{
801 unsigned char dbd; 1326 unsigned char dbd, llbaa;
802 int pcontrol, pcode, subpcode; 1327 int pcontrol, pcode, subpcode, bd_len;
803 unsigned char dev_spec; 1328 unsigned char dev_spec;
804 int alloc_len, msense_6, offset, len, errsts; 1329 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
805 unsigned char * ap; 1330 unsigned char * ap;
806 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1331 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
807 unsigned char *cmd = (unsigned char *)scp->cmnd; 1332 unsigned char *cmd = (unsigned char *)scp->cmnd;
808 1333
809 if ((errsts = check_reset(scp, devip))) 1334 if ((errsts = check_readiness(scp, 1, devip)))
810 return errsts; 1335 return errsts;
811 dbd = cmd[1] & 0x8; 1336 dbd = !!(cmd[1] & 0x8);
812 pcontrol = (cmd[2] & 0xc0) >> 6; 1337 pcontrol = (cmd[2] & 0xc0) >> 6;
813 pcode = cmd[2] & 0x3f; 1338 pcode = cmd[2] & 0x3f;
814 subpcode = cmd[3]; 1339 subpcode = cmd[3];
815 msense_6 = (MODE_SENSE == cmd[0]); 1340 msense_6 = (MODE_SENSE == cmd[0]);
1341 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1342 if ((0 == scsi_debug_ptype) && (0 == dbd))
1343 bd_len = llbaa ? 16 : 8;
1344 else
1345 bd_len = 0;
816 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); 1346 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
817 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 1347 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
818 if (0x3 == pcontrol) { /* Saving values not supported */ 1348 if (0x3 == pcontrol) { /* Saving values not supported */
@@ -820,17 +1350,63 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
820 0); 1350 0);
821 return check_condition_result; 1351 return check_condition_result;
822 } 1352 }
823 dev_spec = DEV_READONLY(target) ? 0x80 : 0x0; 1353 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1354 (devip->target * 1000) - 3;
1355 /* set DPOFUA bit for disks */
1356 if (0 == scsi_debug_ptype)
1357 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1358 else
1359 dev_spec = 0x0;
824 if (msense_6) { 1360 if (msense_6) {
825 arr[2] = dev_spec; 1361 arr[2] = dev_spec;
1362 arr[3] = bd_len;
826 offset = 4; 1363 offset = 4;
827 } else { 1364 } else {
828 arr[3] = dev_spec; 1365 arr[3] = dev_spec;
1366 if (16 == bd_len)
1367 arr[4] = 0x1; /* set LONGLBA bit */
1368 arr[7] = bd_len; /* assume 255 or less */
829 offset = 8; 1369 offset = 8;
830 } 1370 }
831 ap = arr + offset; 1371 ap = arr + offset;
1372 if ((bd_len > 0) && (0 == sdebug_capacity)) {
1373 if (scsi_debug_virtual_gb > 0) {
1374 sdebug_capacity = 2048 * 1024;
1375 sdebug_capacity *= scsi_debug_virtual_gb;
1376 } else
1377 sdebug_capacity = sdebug_store_sectors;
1378 }
1379 if (8 == bd_len) {
1380 if (sdebug_capacity > 0xfffffffe) {
1381 ap[0] = 0xff;
1382 ap[1] = 0xff;
1383 ap[2] = 0xff;
1384 ap[3] = 0xff;
1385 } else {
1386 ap[0] = (sdebug_capacity >> 24) & 0xff;
1387 ap[1] = (sdebug_capacity >> 16) & 0xff;
1388 ap[2] = (sdebug_capacity >> 8) & 0xff;
1389 ap[3] = sdebug_capacity & 0xff;
1390 }
1391 ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1392 ap[7] = SECT_SIZE_PER(target) & 0xff;
1393 offset += bd_len;
1394 ap = arr + offset;
1395 } else if (16 == bd_len) {
1396 unsigned long long capac = sdebug_capacity;
1397
1398 for (k = 0; k < 8; ++k, capac >>= 8)
1399 ap[7 - k] = capac & 0xff;
1400 ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
1401 ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
1402 ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1403 ap[15] = SECT_SIZE_PER(target) & 0xff;
1404 offset += bd_len;
1405 ap = arr + offset;
1406 }
832 1407
833 if (0 != subpcode) { /* TODO: Control Extension page */ 1408 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1409 /* TODO: Control Extension page */
834 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1410 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
835 0); 1411 0);
836 return check_condition_result; 1412 return check_condition_result;
@@ -856,17 +1432,45 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
856 len = resp_ctrl_m_pg(ap, pcontrol, target); 1432 len = resp_ctrl_m_pg(ap, pcontrol, target);
857 offset += len; 1433 offset += len;
858 break; 1434 break;
1435 case 0x19: /* if spc==1 then sas phy, control+discover */
1436 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1437 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1438 INVALID_FIELD_IN_CDB, 0);
1439 return check_condition_result;
1440 }
1441 len = 0;
1442 if ((0x0 == subpcode) || (0xff == subpcode))
1443 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1444 if ((0x1 == subpcode) || (0xff == subpcode))
1445 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1446 target_dev_id);
1447 if ((0x2 == subpcode) || (0xff == subpcode))
1448 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1449 offset += len;
1450 break;
859 case 0x1c: /* Informational Exceptions Mode page, all devices */ 1451 case 0x1c: /* Informational Exceptions Mode page, all devices */
860 len = resp_iec_m_pg(ap, pcontrol, target); 1452 len = resp_iec_m_pg(ap, pcontrol, target);
861 offset += len; 1453 offset += len;
862 break; 1454 break;
863 case 0x3f: /* Read all Mode pages */ 1455 case 0x3f: /* Read all Mode pages */
864 len = resp_err_recov_pg(ap, pcontrol, target); 1456 if ((0 == subpcode) || (0xff == subpcode)) {
865 len += resp_disconnect_pg(ap + len, pcontrol, target); 1457 len = resp_err_recov_pg(ap, pcontrol, target);
866 len += resp_format_pg(ap + len, pcontrol, target); 1458 len += resp_disconnect_pg(ap + len, pcontrol, target);
867 len += resp_caching_pg(ap + len, pcontrol, target); 1459 len += resp_format_pg(ap + len, pcontrol, target);
868 len += resp_ctrl_m_pg(ap + len, pcontrol, target); 1460 len += resp_caching_pg(ap + len, pcontrol, target);
869 len += resp_iec_m_pg(ap + len, pcontrol, target); 1461 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1462 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1463 if (0xff == subpcode) {
1464 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1465 target, target_dev_id);
1466 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1467 }
1468 len += resp_iec_m_pg(ap + len, pcontrol, target);
1469 } else {
1470 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1471 INVALID_FIELD_IN_CDB, 0);
1472 return check_condition_result;
1473 }
870 offset += len; 1474 offset += len;
871 break; 1475 break;
872 default: 1476 default:
@@ -883,71 +1487,314 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
883 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); 1487 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
884} 1488}
885 1489
886static int resp_read(struct scsi_cmnd * SCpnt, int upper_blk, int block, 1490#define SDEBUG_MAX_MSELECT_SZ 512
887 int num, struct sdebug_dev_info * devip) 1491
1492static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1493 struct sdebug_dev_info * devip)
1494{
1495 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1496 int param_len, res, errsts, mpage;
1497 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1498 unsigned char *cmd = (unsigned char *)scp->cmnd;
1499
1500 if ((errsts = check_readiness(scp, 1, devip)))
1501 return errsts;
1502 memset(arr, 0, sizeof(arr));
1503 pf = cmd[1] & 0x10;
1504 sp = cmd[1] & 0x1;
1505 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1506 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1507 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1508 INVALID_FIELD_IN_CDB, 0);
1509 return check_condition_result;
1510 }
1511 res = fetch_to_dev_buffer(scp, arr, param_len);
1512 if (-1 == res)
1513 return (DID_ERROR << 16);
1514 else if ((res < param_len) &&
1515 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1516 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1517 " IO sent=%d bytes\n", param_len, res);
1518 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1519 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1520 if (md_len > 2) {
1521 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1522 INVALID_FIELD_IN_PARAM_LIST, 0);
1523 return check_condition_result;
1524 }
1525 off = bd_len + (mselect6 ? 4 : 8);
1526 mpage = arr[off] & 0x3f;
1527 ps = !!(arr[off] & 0x80);
1528 if (ps) {
1529 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1530 INVALID_FIELD_IN_PARAM_LIST, 0);
1531 return check_condition_result;
1532 }
1533 spf = !!(arr[off] & 0x40);
1534 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1535 (arr[off + 1] + 2);
1536 if ((pg_len + off) > param_len) {
1537 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1538 PARAMETER_LIST_LENGTH_ERR, 0);
1539 return check_condition_result;
1540 }
1541 switch (mpage) {
1542 case 0xa: /* Control Mode page */
1543 if (ctrl_m_pg[1] == arr[off + 1]) {
1544 memcpy(ctrl_m_pg + 2, arr + off + 2,
1545 sizeof(ctrl_m_pg) - 2);
1546 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1547 return 0;
1548 }
1549 break;
1550 case 0x1c: /* Informational Exceptions Mode page */
1551 if (iec_m_pg[1] == arr[off + 1]) {
1552 memcpy(iec_m_pg + 2, arr + off + 2,
1553 sizeof(iec_m_pg) - 2);
1554 return 0;
1555 }
1556 break;
1557 default:
1558 break;
1559 }
1560 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1561 INVALID_FIELD_IN_PARAM_LIST, 0);
1562 return check_condition_result;
1563}
1564
1565static int resp_temp_l_pg(unsigned char * arr)
1566{
1567 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1568 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1569 };
1570
1571 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1572 return sizeof(temp_l_pg);
1573}
1574
1575static int resp_ie_l_pg(unsigned char * arr)
1576{
1577 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1578 };
1579
1580 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1581 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1582 arr[4] = THRESHOLD_EXCEEDED;
1583 arr[5] = 0xff;
1584 }
1585 return sizeof(ie_l_pg);
1586}
1587
1588#define SDEBUG_MAX_LSENSE_SZ 512
1589
1590static int resp_log_sense(struct scsi_cmnd * scp,
1591 struct sdebug_dev_info * devip)
1592{
1593 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1594 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1595 unsigned char *cmd = (unsigned char *)scp->cmnd;
1596
1597 if ((errsts = check_readiness(scp, 1, devip)))
1598 return errsts;
1599 memset(arr, 0, sizeof(arr));
1600 ppc = cmd[1] & 0x2;
1601 sp = cmd[1] & 0x1;
1602 if (ppc || sp) {
1603 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1604 INVALID_FIELD_IN_CDB, 0);
1605 return check_condition_result;
1606 }
1607 pcontrol = (cmd[2] & 0xc0) >> 6;
1608 pcode = cmd[2] & 0x3f;
1609 subpcode = cmd[3] & 0xff;
1610 alloc_len = (cmd[7] << 8) + cmd[8];
1611 arr[0] = pcode;
1612 if (0 == subpcode) {
1613 switch (pcode) {
1614 case 0x0: /* Supported log pages log page */
1615 n = 4;
1616 arr[n++] = 0x0; /* this page */
1617 arr[n++] = 0xd; /* Temperature */
1618 arr[n++] = 0x2f; /* Informational exceptions */
1619 arr[3] = n - 4;
1620 break;
1621 case 0xd: /* Temperature log page */
1622 arr[3] = resp_temp_l_pg(arr + 4);
1623 break;
1624 case 0x2f: /* Informational exceptions log page */
1625 arr[3] = resp_ie_l_pg(arr + 4);
1626 break;
1627 default:
1628 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1629 INVALID_FIELD_IN_CDB, 0);
1630 return check_condition_result;
1631 }
1632 } else if (0xff == subpcode) {
1633 arr[0] |= 0x40;
1634 arr[1] = subpcode;
1635 switch (pcode) {
1636 case 0x0: /* Supported log pages and subpages log page */
1637 n = 4;
1638 arr[n++] = 0x0;
1639 arr[n++] = 0x0; /* 0,0 page */
1640 arr[n++] = 0x0;
1641 arr[n++] = 0xff; /* this page */
1642 arr[n++] = 0xd;
1643 arr[n++] = 0x0; /* Temperature */
1644 arr[n++] = 0x2f;
1645 arr[n++] = 0x0; /* Informational exceptions */
1646 arr[3] = n - 4;
1647 break;
1648 case 0xd: /* Temperature subpages */
1649 n = 4;
1650 arr[n++] = 0xd;
1651 arr[n++] = 0x0; /* Temperature */
1652 arr[3] = n - 4;
1653 break;
1654 case 0x2f: /* Informational exceptions subpages */
1655 n = 4;
1656 arr[n++] = 0x2f;
1657 arr[n++] = 0x0; /* Informational exceptions */
1658 arr[3] = n - 4;
1659 break;
1660 default:
1661 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1662 INVALID_FIELD_IN_CDB, 0);
1663 return check_condition_result;
1664 }
1665 } else {
1666 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1667 INVALID_FIELD_IN_CDB, 0);
1668 return check_condition_result;
1669 }
1670 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1671 return fill_from_dev_buffer(scp, arr,
1672 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1673}
1674
1675static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
1676 unsigned int num, struct sdebug_dev_info * devip)
888{ 1677{
889 unsigned long iflags; 1678 unsigned long iflags;
1679 unsigned int block, from_bottom;
1680 unsigned long long u;
890 int ret; 1681 int ret;
891 1682
892 if (upper_blk || (block + num > sdebug_capacity)) { 1683 if (lba + num > sdebug_capacity) {
893 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 1684 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
894 0); 1685 0);
895 return check_condition_result; 1686 return check_condition_result;
896 } 1687 }
1688 /* transfer length excessive (tie in to block limits VPD page) */
1689 if (num > sdebug_store_sectors) {
1690 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1691 0);
1692 return check_condition_result;
1693 }
897 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && 1694 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
898 (block <= OPT_MEDIUM_ERR_ADDR) && 1695 (lba <= OPT_MEDIUM_ERR_ADDR) &&
899 ((block + num) > OPT_MEDIUM_ERR_ADDR)) { 1696 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1697 /* claim unrecoverable read error */
900 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 1698 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
901 0); 1699 0);
902 /* claim unrecoverable read error */ 1700 /* set info field and valid bit for fixed descriptor */
1701 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1702 devip->sense_buff[0] |= 0x80; /* Valid bit */
1703 ret = OPT_MEDIUM_ERR_ADDR;
1704 devip->sense_buff[3] = (ret >> 24) & 0xff;
1705 devip->sense_buff[4] = (ret >> 16) & 0xff;
1706 devip->sense_buff[5] = (ret >> 8) & 0xff;
1707 devip->sense_buff[6] = ret & 0xff;
1708 }
903 return check_condition_result; 1709 return check_condition_result;
904 } 1710 }
905 read_lock_irqsave(&atomic_rw, iflags); 1711 read_lock_irqsave(&atomic_rw, iflags);
906 ret = fill_from_dev_buffer(SCpnt, fake_storep + (block * SECT_SIZE), 1712 if ((lba + num) <= sdebug_store_sectors)
907 num * SECT_SIZE); 1713 ret = fill_from_dev_buffer(SCpnt,
1714 fake_storep + (lba * SECT_SIZE),
1715 num * SECT_SIZE);
1716 else {
1717 /* modulo when one arg is 64 bits needs do_div() */
1718 u = lba;
1719 block = do_div(u, sdebug_store_sectors);
1720 from_bottom = 0;
1721 if ((block + num) > sdebug_store_sectors)
1722 from_bottom = (block + num) - sdebug_store_sectors;
1723 ret = fill_from_dev_buffer(SCpnt,
1724 fake_storep + (block * SECT_SIZE),
1725 (num - from_bottom) * SECT_SIZE);
1726 if ((0 == ret) && (from_bottom > 0))
1727 ret = fill_from_dev_buffer(SCpnt, fake_storep,
1728 from_bottom * SECT_SIZE);
1729 }
908 read_unlock_irqrestore(&atomic_rw, iflags); 1730 read_unlock_irqrestore(&atomic_rw, iflags);
909 return ret; 1731 return ret;
910} 1732}
911 1733
912static int resp_write(struct scsi_cmnd * SCpnt, int upper_blk, int block, 1734static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
913 int num, struct sdebug_dev_info * devip) 1735 unsigned int num, struct sdebug_dev_info * devip)
914{ 1736{
915 unsigned long iflags; 1737 unsigned long iflags;
1738 unsigned int block, to_bottom;
1739 unsigned long long u;
916 int res; 1740 int res;
917 1741
918 if (upper_blk || (block + num > sdebug_capacity)) { 1742 if (lba + num > sdebug_capacity) {
919 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 1743 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
920 0); 1744 0);
921 return check_condition_result; 1745 return check_condition_result;
922 } 1746 }
1747 /* transfer length excessive (tie in to block limits VPD page) */
1748 if (num > sdebug_store_sectors) {
1749 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1750 0);
1751 return check_condition_result;
1752 }
923 1753
924 write_lock_irqsave(&atomic_rw, iflags); 1754 write_lock_irqsave(&atomic_rw, iflags);
925 res = fetch_to_dev_buffer(SCpnt, fake_storep + (block * SECT_SIZE), 1755 if ((lba + num) <= sdebug_store_sectors)
926 num * SECT_SIZE); 1756 res = fetch_to_dev_buffer(SCpnt,
1757 fake_storep + (lba * SECT_SIZE),
1758 num * SECT_SIZE);
1759 else {
1760 /* modulo when one arg is 64 bits needs do_div() */
1761 u = lba;
1762 block = do_div(u, sdebug_store_sectors);
1763 to_bottom = 0;
1764 if ((block + num) > sdebug_store_sectors)
1765 to_bottom = (block + num) - sdebug_store_sectors;
1766 res = fetch_to_dev_buffer(SCpnt,
1767 fake_storep + (block * SECT_SIZE),
1768 (num - to_bottom) * SECT_SIZE);
1769 if ((0 == res) && (to_bottom > 0))
1770 res = fetch_to_dev_buffer(SCpnt, fake_storep,
1771 to_bottom * SECT_SIZE);
1772 }
927 write_unlock_irqrestore(&atomic_rw, iflags); 1773 write_unlock_irqrestore(&atomic_rw, iflags);
928 if (-1 == res) 1774 if (-1 == res)
929 return (DID_ERROR << 16); 1775 return (DID_ERROR << 16);
930 else if ((res < (num * SECT_SIZE)) && 1776 else if ((res < (num * SECT_SIZE)) &&
931 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 1777 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
932 printk(KERN_INFO "scsi_debug: write: cdb indicated=%d, " 1778 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
933 " IO sent=%d bytes\n", num * SECT_SIZE, res); 1779 " IO sent=%d bytes\n", num * SECT_SIZE, res);
934 return 0; 1780 return 0;
935} 1781}
936 1782
937#define SDEBUG_RLUN_ARR_SZ 128 1783#define SDEBUG_RLUN_ARR_SZ 256
938 1784
939static int resp_report_luns(struct scsi_cmnd * scp, 1785static int resp_report_luns(struct scsi_cmnd * scp,
940 struct sdebug_dev_info * devip) 1786 struct sdebug_dev_info * devip)
941{ 1787{
942 unsigned int alloc_len; 1788 unsigned int alloc_len;
943 int lun_cnt, i, upper; 1789 int lun_cnt, i, upper, num, n, wlun, lun;
944 unsigned char *cmd = (unsigned char *)scp->cmnd; 1790 unsigned char *cmd = (unsigned char *)scp->cmnd;
945 int select_report = (int)cmd[2]; 1791 int select_report = (int)cmd[2];
946 struct scsi_lun *one_lun; 1792 struct scsi_lun *one_lun;
947 unsigned char arr[SDEBUG_RLUN_ARR_SZ]; 1793 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
1794 unsigned char * max_addr;
948 1795
949 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); 1796 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
950 if ((alloc_len < 16) || (select_report > 2)) { 1797 if ((alloc_len < 4) || (select_report > 2)) {
951 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1798 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
952 0); 1799 0);
953 return check_condition_result; 1800 return check_condition_result;
@@ -955,18 +1802,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,
955 /* can produce response with up to 16k luns (lun 0 to lun 16383) */ 1802 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
956 memset(arr, 0, SDEBUG_RLUN_ARR_SZ); 1803 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
957 lun_cnt = scsi_debug_max_luns; 1804 lun_cnt = scsi_debug_max_luns;
958 arr[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff; 1805 if (1 == select_report)
959 arr[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff; 1806 lun_cnt = 0;
960 lun_cnt = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / 1807 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
961 sizeof(struct scsi_lun)), lun_cnt); 1808 --lun_cnt;
1809 wlun = (select_report > 0) ? 1 : 0;
1810 num = lun_cnt + wlun;
1811 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
1812 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
1813 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
1814 sizeof(struct scsi_lun)), num);
1815 if (n < num) {
1816 wlun = 0;
1817 lun_cnt = n;
1818 }
962 one_lun = (struct scsi_lun *) &arr[8]; 1819 one_lun = (struct scsi_lun *) &arr[8];
963 for (i = 0; i < lun_cnt; i++) { 1820 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
964 upper = (i >> 8) & 0x3f; 1821 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
1822 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
1823 i++, lun++) {
1824 upper = (lun >> 8) & 0x3f;
965 if (upper) 1825 if (upper)
966 one_lun[i].scsi_lun[0] = 1826 one_lun[i].scsi_lun[0] =
967 (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); 1827 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
968 one_lun[i].scsi_lun[1] = i & 0xff; 1828 one_lun[i].scsi_lun[1] = lun & 0xff;
969 } 1829 }
1830 if (wlun) {
1831 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
1832 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
1833 i++;
1834 }
1835 alloc_len = (unsigned char *)(one_lun + i) - arr;
970 return fill_from_dev_buffer(scp, arr, 1836 return fill_from_dev_buffer(scp, arr,
971 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); 1837 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
972} 1838}
@@ -1002,7 +1868,8 @@ static void timer_intr_handler(unsigned long indx)
1002static int scsi_debug_slave_alloc(struct scsi_device * sdp) 1868static int scsi_debug_slave_alloc(struct scsi_device * sdp)
1003{ 1869{
1004 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1870 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1005 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_alloc\n"); 1871 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
1872 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1006 return 0; 1873 return 0;
1007} 1874}
1008 1875
@@ -1011,7 +1878,8 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
1011 struct sdebug_dev_info * devip; 1878 struct sdebug_dev_info * devip;
1012 1879
1013 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1880 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1014 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_configure\n"); 1881 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
1882 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1015 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 1883 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
1016 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 1884 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
1017 devip = devInfoReg(sdp); 1885 devip = devInfoReg(sdp);
@@ -1019,6 +1887,7 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
1019 if (sdp->host->cmd_per_lun) 1887 if (sdp->host->cmd_per_lun)
1020 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, 1888 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
1021 sdp->host->cmd_per_lun); 1889 sdp->host->cmd_per_lun);
1890 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
1022 return 0; 1891 return 0;
1023} 1892}
1024 1893
@@ -1028,7 +1897,8 @@ static void scsi_debug_slave_destroy(struct scsi_device * sdp)
1028 (struct sdebug_dev_info *)sdp->hostdata; 1897 (struct sdebug_dev_info *)sdp->hostdata;
1029 1898
1030 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1899 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1031 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_destroy\n"); 1900 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
1901 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1032 if (devip) { 1902 if (devip) {
1033 /* make this slot avaliable for re-use */ 1903 /* make this slot avaliable for re-use */
1034 devip->used = 0; 1904 devip->used = 0;
@@ -1085,6 +1955,8 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
1085 open_devip->sense_buff[0] = 0x70; 1955 open_devip->sense_buff[0] = 0x70;
1086 open_devip->sense_buff[7] = 0xa; 1956 open_devip->sense_buff[7] = 0xa;
1087 } 1957 }
1958 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
1959 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
1088 return open_devip; 1960 return open_devip;
1089 } 1961 }
1090 return NULL; 1962 return NULL;
@@ -1273,7 +2145,7 @@ static void __init sdebug_build_parts(unsigned char * ramp)
1273 printk(KERN_WARNING "scsi_debug:build_parts: reducing " 2145 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
1274 "partitions to %d\n", SDEBUG_MAX_PARTS); 2146 "partitions to %d\n", SDEBUG_MAX_PARTS);
1275 } 2147 }
1276 num_sectors = (int)(sdebug_store_size / SECT_SIZE); 2148 num_sectors = (int)sdebug_store_sectors;
1277 sectors_per_part = (num_sectors - sdebug_sectors_per) 2149 sectors_per_part = (num_sectors - sdebug_sectors_per)
1278 / scsi_debug_num_parts; 2150 / scsi_debug_num_parts;
1279 heads_by_sects = sdebug_heads * sdebug_sectors_per; 2151 heads_by_sects = sdebug_heads * sdebug_sectors_per;
@@ -1316,9 +2188,9 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
1316 if (scsi_result) { 2188 if (scsi_result) {
1317 struct scsi_device * sdp = cmnd->device; 2189 struct scsi_device * sdp = cmnd->device;
1318 2190
1319 sdev_printk(KERN_INFO, sdp, 2191 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
1320 "non-zero result=0x%x\n", 2192 "non-zero result=0x%x\n", sdp->host->host_no,
1321 scsi_result); 2193 sdp->channel, sdp->id, sdp->lun, scsi_result);
1322 } 2194 }
1323 } 2195 }
1324 if (cmnd && devip) { 2196 if (cmnd && devip) {
@@ -1365,21 +2237,28 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
1365 } 2237 }
1366} 2238}
1367 2239
1368/* Set 'perm' (4th argument) to 0 to disable module_param's definition 2240/* Note: The following macros create attribute files in the
1369 * of sysfs parameters (which module_param doesn't yet support). 2241 /sys/module/scsi_debug/parameters directory. Unfortunately this
1370 * Sysfs parameters defined explicitly below. 2242 driver is unaware of a change and cannot trigger auxiliary actions
2243 as it can when the corresponding attribute in the
2244 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
1371 */ 2245 */
1372module_param_named(add_host, scsi_debug_add_host, int, 0); /* perm=0644 */ 2246module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
1373module_param_named(delay, scsi_debug_delay, int, 0); /* perm=0644 */ 2247module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
1374module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, 0); 2248module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
1375module_param_named(dsense, scsi_debug_dsense, int, 0); 2249module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
1376module_param_named(every_nth, scsi_debug_every_nth, int, 0); 2250module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
1377module_param_named(max_luns, scsi_debug_max_luns, int, 0); 2251module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
1378module_param_named(num_parts, scsi_debug_num_parts, int, 0); 2252module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
1379module_param_named(num_tgts, scsi_debug_num_tgts, int, 0); 2253module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
1380module_param_named(opts, scsi_debug_opts, int, 0); /* perm=0644 */ 2254module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
1381module_param_named(ptype, scsi_debug_ptype, int, 0); 2255module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
1382module_param_named(scsi_level, scsi_debug_scsi_level, int, 0); 2256module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2257module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2258module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2259module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2260module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2261 S_IRUGO | S_IWUSR);
1383 2262
1384MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2263MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
1385MODULE_DESCRIPTION("SCSI debug adapter driver"); 2264MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -1388,15 +2267,19 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
1388 2267
1389MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); 2268MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
1390MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); 2269MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
1391MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs"); 2270MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
1392MODULE_PARM_DESC(dsense, "use descriptor sense format(def: fixed)"); 2271MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
1393MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)"); 2272MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
1394MODULE_PARM_DESC(max_luns, "number of SCSI LUNs per target to simulate"); 2273MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2274MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2275MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
1395MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2276MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
1396MODULE_PARM_DESC(num_tgts, "number of SCSI targets per host to simulate"); 2277MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
1397MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->..."); 2278MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)");
1398MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2279MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
1399MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2280MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2281MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2282MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
1400 2283
1401 2284
1402static char sdebug_info[256]; 2285static char sdebug_info[256];
@@ -1548,6 +2431,42 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,
1548DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, 2431DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
1549 sdebug_dsense_store); 2432 sdebug_dsense_store);
1550 2433
2434static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2435{
2436 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2437}
2438static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2439 const char * buf, size_t count)
2440{
2441 int n;
2442
2443 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2444 scsi_debug_fake_rw = n;
2445 return count;
2446 }
2447 return -EINVAL;
2448}
2449DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2450 sdebug_fake_rw_store);
2451
2452static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2453{
2454 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2455}
2456static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2457 const char * buf, size_t count)
2458{
2459 int n;
2460
2461 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2462 scsi_debug_no_lun_0 = n;
2463 return count;
2464 }
2465 return -EINVAL;
2466}
2467DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2468 sdebug_no_lun_0_store);
2469
1551static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf) 2470static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
1552{ 2471{
1553 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); 2472 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
@@ -1623,6 +2542,29 @@ static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
1623} 2542}
1624DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL); 2543DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
1625 2544
2545static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
2546{
2547 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
2548}
2549static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
2550 const char * buf, size_t count)
2551{
2552 int n;
2553
2554 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2555 scsi_debug_virtual_gb = n;
2556 if (scsi_debug_virtual_gb > 0) {
2557 sdebug_capacity = 2048 * 1024;
2558 sdebug_capacity *= scsi_debug_virtual_gb;
2559 } else
2560 sdebug_capacity = sdebug_store_sectors;
2561 return count;
2562 }
2563 return -EINVAL;
2564}
2565DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
2566 sdebug_virtual_gb_store);
2567
1626static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) 2568static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
1627{ 2569{
1628 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); 2570 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
@@ -1660,29 +2602,65 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
1660DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2602DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
1661 sdebug_add_host_store); 2603 sdebug_add_host_store);
1662 2604
1663static void do_create_driverfs_files(void) 2605static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
2606 char * buf)
1664{ 2607{
1665 driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2608 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
1666 driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2609}
1667 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2610static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
1668 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2611 const char * buf, size_t count)
1669 driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2612{
1670 driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2613 int n;
1671 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2614
1672 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2615 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
1673 driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2616 scsi_debug_vpd_use_hostno = n;
1674 driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2617 return count;
1675 driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2618 }
2619 return -EINVAL;
2620}
2621DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
2622 sdebug_vpd_use_hostno_store);
2623
2624/* Note: The following function creates attribute files in the
2625 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2626 files (over those found in the /sys/module/scsi_debug/parameters
2627 directory) is that auxiliary actions can be triggered when an attribute
2628 is changed. For example see: sdebug_add_host_store() above.
2629 */
2630static int do_create_driverfs_files(void)
2631{
2632 int ret;
2633
2634 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
2635 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
2636 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2637 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2638 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2639 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
2640 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2641 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
2642 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2643 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2644 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2645 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2646 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2647 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2648 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2649 return ret;
1676} 2650}
1677 2651
1678static void do_remove_driverfs_files(void) 2652static void do_remove_driverfs_files(void)
1679{ 2653{
2654 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2655 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
1680 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2656 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
1681 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); 2657 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
1682 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2658 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
1683 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
1684 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2659 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2660 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2661 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
1685 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2662 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2663 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
1686 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2664 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
1687 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2665 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
1688 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2666 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
@@ -1692,14 +2670,20 @@ static void do_remove_driverfs_files(void)
1692 2670
1693static int __init scsi_debug_init(void) 2671static int __init scsi_debug_init(void)
1694{ 2672{
1695 unsigned long sz; 2673 unsigned int sz;
1696 int host_to_add; 2674 int host_to_add;
1697 int k; 2675 int k;
2676 int ret;
1698 2677
1699 if (scsi_debug_dev_size_mb < 1) 2678 if (scsi_debug_dev_size_mb < 1)
1700 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2679 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
1701 sdebug_store_size = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2680 sdebug_store_size = (unsigned int)scsi_debug_dev_size_mb * 1048576;
1702 sdebug_capacity = sdebug_store_size / SECT_SIZE; 2681 sdebug_store_sectors = sdebug_store_size / SECT_SIZE;
2682 if (scsi_debug_virtual_gb > 0) {
2683 sdebug_capacity = 2048 * 1024;
2684 sdebug_capacity *= scsi_debug_virtual_gb;
2685 } else
2686 sdebug_capacity = sdebug_store_sectors;
1703 2687
1704 /* play around with geometry, don't waste too much on track 0 */ 2688 /* play around with geometry, don't waste too much on track 0 */
1705 sdebug_heads = 8; 2689 sdebug_heads = 8;
@@ -1728,12 +2712,32 @@ static int __init scsi_debug_init(void)
1728 if (scsi_debug_num_parts > 0) 2712 if (scsi_debug_num_parts > 0)
1729 sdebug_build_parts(fake_storep); 2713 sdebug_build_parts(fake_storep);
1730 2714
1731 init_all_queued(); 2715 ret = device_register(&pseudo_primary);
2716 if (ret < 0) {
2717 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
2718 ret);
2719 goto free_vm;
2720 }
2721 ret = bus_register(&pseudo_lld_bus);
2722 if (ret < 0) {
2723 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
2724 ret);
2725 goto dev_unreg;
2726 }
2727 ret = driver_register(&sdebug_driverfs_driver);
2728 if (ret < 0) {
2729 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
2730 ret);
2731 goto bus_unreg;
2732 }
2733 ret = do_create_driverfs_files();
2734 if (ret < 0) {
2735 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
2736 ret);
2737 goto del_files;
2738 }
1732 2739
1733 device_register(&pseudo_primary); 2740 init_all_queued();
1734 bus_register(&pseudo_lld_bus);
1735 driver_register(&sdebug_driverfs_driver);
1736 do_create_driverfs_files();
1737 2741
1738 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2742 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
1739 2743
@@ -1753,6 +2757,18 @@ static int __init scsi_debug_init(void)
1753 scsi_debug_add_host); 2757 scsi_debug_add_host);
1754 } 2758 }
1755 return 0; 2759 return 0;
2760
2761del_files:
2762 do_remove_driverfs_files();
2763 driver_unregister(&sdebug_driverfs_driver);
2764bus_unreg:
2765 bus_unregister(&pseudo_lld_bus);
2766dev_unreg:
2767 device_unregister(&pseudo_primary);
2768free_vm:
2769 vfree(fake_storep);
2770
2771 return ret;
1756} 2772}
1757 2773
1758static void __exit scsi_debug_exit(void) 2774static void __exit scsi_debug_exit(void)
@@ -1813,7 +2829,7 @@ static int sdebug_add_adapter(void)
1813 struct sdebug_dev_info *sdbg_devinfo; 2829 struct sdebug_dev_info *sdbg_devinfo;
1814 struct list_head *lh, *lh_sf; 2830 struct list_head *lh, *lh_sf;
1815 2831
1816 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL); 2832 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
1817 2833
1818 if (NULL == sdbg_host) { 2834 if (NULL == sdbg_host) {
1819 printk(KERN_ERR "%s: out of memory at line %d\n", 2835 printk(KERN_ERR "%s: out of memory at line %d\n",
@@ -1825,7 +2841,7 @@ static int sdebug_add_adapter(void)
1825 2841
1826 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; 2842 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
1827 for (k = 0; k < devs_per_host; k++) { 2843 for (k = 0; k < devs_per_host; k++) {
1828 sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo), GFP_KERNEL); 2844 sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo),GFP_KERNEL);
1829 if (NULL == sdbg_devinfo) { 2845 if (NULL == sdbg_devinfo) {
1830 printk(KERN_ERR "%s: out of memory at line %d\n", 2846 printk(KERN_ERR "%s: out of memory at line %d\n",
1831 __FUNCTION__, __LINE__); 2847 __FUNCTION__, __LINE__);
@@ -1906,7 +2922,7 @@ static int sdebug_driver_probe(struct device * dev)
1906 hpnt->max_id = scsi_debug_num_tgts + 1; 2922 hpnt->max_id = scsi_debug_num_tgts + 1;
1907 else 2923 else
1908 hpnt->max_id = scsi_debug_num_tgts; 2924 hpnt->max_id = scsi_debug_num_tgts;
1909 hpnt->max_lun = scsi_debug_max_luns; 2925 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
1910 2926
1911 error = scsi_add_host(hpnt, &sdbg_host->dev); 2927 error = scsi_add_host(hpnt, &sdbg_host->dev);
1912 if (error) { 2928 if (error) {
@@ -1960,7 +2976,7 @@ static void sdebug_max_tgts_luns(void)
1960 hpnt->max_id = scsi_debug_num_tgts + 1; 2976 hpnt->max_id = scsi_debug_num_tgts + 1;
1961 else 2977 else
1962 hpnt->max_id = scsi_debug_num_tgts; 2978 hpnt->max_id = scsi_debug_num_tgts;
1963 hpnt->max_lun = scsi_debug_max_luns; 2979 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* scsi_debug_max_luns; */
1964 } 2980 }
1965 spin_unlock(&sdebug_host_list_lock); 2981 spin_unlock(&sdebug_host_list_lock);
1966} 2982}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index fb5cb4c9ac65..3d0429bc14ab 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -162,7 +162,7 @@ static struct {
162 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 162 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
163 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 163 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ 164 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
165 {"HP", "OPEN-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP XP Arrays */ 165 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
166 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, 166 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
167 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 167 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
168 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 168 {"HP", "C1557A", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a7a60fc0a4e..3d355d054612 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,19 +460,71 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
464 int cmnd_size, int timeout, int copy_sense)
464{ 465{
465 struct scsi_device *sdev = scmd->device; 466 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 467 struct Scsi_Host *shost = sdev->host;
467 DECLARE_COMPLETION(done); 468 int old_result = scmd->result;
469 DECLARE_COMPLETION_ONSTACK(done);
468 unsigned long timeleft; 470 unsigned long timeleft;
469 unsigned long flags; 471 unsigned long flags;
472 unsigned char old_cmnd[MAX_COMMAND_SIZE];
473 enum dma_data_direction old_data_direction;
474 unsigned short old_use_sg;
475 unsigned char old_cmd_len;
476 unsigned old_bufflen;
477 void *old_buffer;
470 int rtn; 478 int rtn;
471 479
480 /*
481 * We need saved copies of a number of fields - this is because
482 * error handling may need to overwrite these with different values
483 * to run different commands, and once error handling is complete,
484 * we will need to restore these values prior to running the actual
485 * command.
486 */
487 old_buffer = scmd->request_buffer;
488 old_bufflen = scmd->request_bufflen;
489 memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
490 old_data_direction = scmd->sc_data_direction;
491 old_cmd_len = scmd->cmd_len;
492 old_use_sg = scmd->use_sg;
493
494 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
495 memcpy(scmd->cmnd, cmnd, cmnd_size);
496
497 if (copy_sense) {
498 int gfp_mask = GFP_ATOMIC;
499
500 if (shost->hostt->unchecked_isa_dma)
501 gfp_mask |= __GFP_DMA;
502
503 scmd->sc_data_direction = DMA_FROM_DEVICE;
504 scmd->request_bufflen = 252;
505 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
506 if (!scmd->request_buffer)
507 return FAILED;
508 } else {
509 scmd->request_buffer = NULL;
510 scmd->request_bufflen = 0;
511 scmd->sc_data_direction = DMA_NONE;
512 }
513
514 scmd->underflow = 0;
515 scmd->use_sg = 0;
516 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
517
472 if (sdev->scsi_level <= SCSI_2) 518 if (sdev->scsi_level <= SCSI_2)
473 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 519 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
474 (sdev->lun << 5 & 0xe0); 520 (sdev->lun << 5 & 0xe0);
475 521
522 /*
523 * Zero the sense buffer. The scsi spec mandates that any
524 * untransferred sense data should be interpreted as being zero.
525 */
526 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
527
476 shost->eh_action = &done; 528 shost->eh_action = &done;
477 529
478 spin_lock_irqsave(shost->host_lock, flags); 530 spin_lock_irqsave(shost->host_lock, flags);
@@ -522,6 +574,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
522 rtn = FAILED; 574 rtn = FAILED;
523 } 575 }
524 576
577
578 /*
579 * Last chance to have valid sense data.
580 */
581 if (copy_sense) {
582 if (!SCSI_SENSE_VALID(scmd)) {
583 memcpy(scmd->sense_buffer, scmd->request_buffer,
584 sizeof(scmd->sense_buffer));
585 }
586 kfree(scmd->request_buffer);
587 }
588
589
590 /*
591 * Restore original data
592 */
593 scmd->request_buffer = old_buffer;
594 scmd->request_bufflen = old_bufflen;
595 memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
596 scmd->sc_data_direction = old_data_direction;
597 scmd->cmd_len = old_cmd_len;
598 scmd->use_sg = old_use_sg;
599 scmd->result = old_result;
525 return rtn; 600 return rtn;
526} 601}
527 602
@@ -537,56 +612,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
537static int scsi_request_sense(struct scsi_cmnd *scmd) 612static int scsi_request_sense(struct scsi_cmnd *scmd)
538{ 613{
539 static unsigned char generic_sense[6] = 614 static unsigned char generic_sense[6] =
540 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 615 {REQUEST_SENSE, 0, 0, 0, 252, 0};
541 unsigned char *scsi_result;
542 int saved_result;
543 int rtn;
544
545 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
546
547 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
548 616
549 617 return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
550 if (unlikely(!scsi_result)) {
551 printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
552 __FUNCTION__);
553 return FAILED;
554 }
555
556 /*
557 * zero the sense buffer. some host adapters automatically always
558 * request sense, so it is not a good idea that
559 * scmd->request_buffer and scmd->sense_buffer point to the same
560 * address (db). 0 is not a valid sense code.
561 */
562 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
563 memset(scsi_result, 0, 252);
564
565 saved_result = scmd->result;
566 scmd->request_buffer = scsi_result;
567 scmd->request_bufflen = 252;
568 scmd->use_sg = 0;
569 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
570 scmd->sc_data_direction = DMA_FROM_DEVICE;
571 scmd->underflow = 0;
572
573 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
574
575 /* last chance to have valid sense data */
576 if(!SCSI_SENSE_VALID(scmd)) {
577 memcpy(scmd->sense_buffer, scmd->request_buffer,
578 sizeof(scmd->sense_buffer));
579 }
580
581 kfree(scsi_result);
582
583 /*
584 * when we eventually call scsi_finish, we really wish to complete
585 * the original request, so let's restore the original data. (db)
586 */
587 scsi_setup_cmd_retry(scmd);
588 scmd->result = saved_result;
589 return rtn;
590} 618}
591 619
592/** 620/**
@@ -605,12 +633,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
605{ 633{
606 scmd->device->host->host_failed--; 634 scmd->device->host->host_failed--;
607 scmd->eh_eflags = 0; 635 scmd->eh_eflags = 0;
608
609 /*
610 * set this back so that the upper level can correctly free up
611 * things.
612 */
613 scsi_setup_cmd_retry(scmd);
614 list_move_tail(&scmd->eh_entry, done_q); 636 list_move_tail(&scmd->eh_entry, done_q);
615} 637}
616EXPORT_SYMBOL(scsi_eh_finish_cmd); 638EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -715,47 +737,23 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
715{ 737{
716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 738 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
717 int retry_cnt = 1, rtn; 739 int retry_cnt = 1, rtn;
718 int saved_result;
719 740
720retry_tur: 741retry_tur:
721 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 742 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
722
723 /*
724 * zero the sense buffer. the scsi spec mandates that any
725 * untransferred sense data should be interpreted as being zero.
726 */
727 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
728 743
729 saved_result = scmd->result;
730 scmd->request_buffer = NULL;
731 scmd->request_bufflen = 0;
732 scmd->use_sg = 0;
733 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
734 scmd->underflow = 0;
735 scmd->sc_data_direction = DMA_NONE;
736
737 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
738
739 /*
740 * when we eventually call scsi_finish, we really wish to complete
741 * the original request, so let's restore the original data. (db)
742 */
743 scsi_setup_cmd_retry(scmd);
744 scmd->result = saved_result;
745
746 /*
747 * hey, we are done. let's look to see what happened.
748 */
749 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
750 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
751 if (rtn == SUCCESS) 746
752 return 0; 747 switch (rtn) {
753 else if (rtn == NEEDS_RETRY) { 748 case NEEDS_RETRY:
754 if (retry_cnt--) 749 if (retry_cnt--)
755 goto retry_tur; 750 goto retry_tur;
751 /*FALLTHRU*/
752 case SUCCESS:
756 return 0; 753 return 0;
754 default:
755 return 1;
757 } 756 }
758 return 1;
759} 757}
760 758
761/** 759/**
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
837static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 835static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
838{ 836{
839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 837 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
840 int rtn;
841 int saved_result;
842
843 if (!scmd->device->allow_restart)
844 return 1;
845 838
846 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 839 if (scmd->device->allow_restart) {
847 840 int rtn;
848 /*
849 * zero the sense buffer. the scsi spec mandates that any
850 * untransferred sense data should be interpreted as being zero.
851 */
852 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
853
854 saved_result = scmd->result;
855 scmd->request_buffer = NULL;
856 scmd->request_bufflen = 0;
857 scmd->use_sg = 0;
858 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
859 scmd->underflow = 0;
860 scmd->sc_data_direction = DMA_NONE;
861 841
862 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 842 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
863 843 START_UNIT_TIMEOUT, 0);
864 /* 844 if (rtn == SUCCESS)
865 * when we eventually call scsi_finish, we really wish to complete 845 return 0;
866 * the original request, so let's restore the original data. (db) 846 }
867 */
868 scsi_setup_cmd_retry(scmd);
869 scmd->result = saved_result;
870 847
871 /*
872 * hey, we are done. let's look to see what happened.
873 */
874 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
875 __FUNCTION__, scmd, rtn));
876 if (rtn == SUCCESS)
877 return 0;
878 return 1; 848 return 1;
879} 849}
880 850
@@ -1672,7 +1642,9 @@ int
1672scsi_reset_provider(struct scsi_device *dev, int flag) 1642scsi_reset_provider(struct scsi_device *dev, int flag)
1673{ 1643{
1674 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1644 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1645 struct Scsi_Host *shost = dev->host;
1675 struct request req; 1646 struct request req;
1647 unsigned long flags;
1676 int rtn; 1648 int rtn;
1677 1649
1678 scmd->request = &req; 1650 scmd->request = &req;
@@ -1682,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1682 1654
1683 scmd->scsi_done = scsi_reset_provider_done_command; 1655 scmd->scsi_done = scsi_reset_provider_done_command;
1684 scmd->done = NULL; 1656 scmd->done = NULL;
1685 scmd->buffer = NULL;
1686 scmd->bufflen = 0;
1687 scmd->request_buffer = NULL; 1657 scmd->request_buffer = NULL;
1688 scmd->request_bufflen = 0; 1658 scmd->request_bufflen = 0;
1689 1659
@@ -1699,6 +1669,10 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1699 */ 1669 */
1700 scmd->pid = 0; 1670 scmd->pid = 0;
1701 1671
1672 spin_lock_irqsave(shost->host_lock, flags);
1673 shost->tmf_in_progress = 1;
1674 spin_unlock_irqrestore(shost->host_lock, flags);
1675
1702 switch (flag) { 1676 switch (flag) {
1703 case SCSI_TRY_RESET_DEVICE: 1677 case SCSI_TRY_RESET_DEVICE:
1704 rtn = scsi_try_bus_device_reset(scmd); 1678 rtn = scsi_try_bus_device_reset(scmd);
@@ -1717,6 +1691,22 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1717 rtn = FAILED; 1691 rtn = FAILED;
1718 } 1692 }
1719 1693
1694 spin_lock_irqsave(shost->host_lock, flags);
1695 shost->tmf_in_progress = 0;
1696 spin_unlock_irqrestore(shost->host_lock, flags);
1697
1698 /*
1699 * be sure to wake up anyone who was sleeping or had their queue
1700 * suspended while we performed the TMF.
1701 */
1702 SCSI_LOG_ERROR_RECOVERY(3,
1703 printk("%s: waking up host to restart after TMF\n",
1704 __FUNCTION__));
1705
1706 wake_up(&shost->host_wait);
1707
1708 scsi_run_host_queues(shost);
1709
1720 scsi_next_command(scmd); 1710 scsi_next_command(scmd);
1721 return rtn; 1711 return rtn;
1722} 1712}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
110 sshdr.asc, sshdr.ascq); 110 sshdr.asc, sshdr.ascq);
111 break; 111 break;
112 case NOT_READY: /* This happens if there is no disc in drive */ 112 case NOT_READY: /* This happens if there is no disc in drive */
113 if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { 113 if (sdev->removable)
114 printk(KERN_INFO "Device not ready. Make sure"
115 " there is a disc in the drive.\n");
116 break; 114 break;
117 }
118 case UNIT_ATTENTION: 115 case UNIT_ATTENTION:
119 if (sdev->removable) { 116 if (sdev->removable) {
120 sdev->changed = 1; 117 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3d04a9f386ac..d6743b959a72 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
436 * 436 *
437 * Arguments: cmd - command that is ready to be queued. 437 * Arguments: cmd - command that is ready to be queued.
438 * 438 *
439 * Returns: Nothing
440 *
441 * Notes: This function has the job of initializing a number of 439 * Notes: This function has the job of initializing a number of
442 * fields related to error handling. Typically this will 440 * fields related to error handling. Typically this will
443 * be called once for each command, as required. 441 * be called once for each command, as required.
444 */ 442 */
445static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
446{ 444{
447 cmd->serial_number = 0; 445 cmd->serial_number = 0;
448
449 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
450
451 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
452 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
453
454 /*
455 * We need saved copies of a number of fields - this is because
456 * error handling may need to overwrite these with different values
457 * to run different commands, and once error handling is complete,
458 * we will need to restore these values prior to running the actual
459 * command.
460 */
461 cmd->old_use_sg = cmd->use_sg;
462 cmd->old_cmd_len = cmd->cmd_len;
463 cmd->sc_old_data_direction = cmd->sc_data_direction;
464 cmd->old_underflow = cmd->underflow;
465 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
466 cmd->buffer = cmd->request_buffer;
467 cmd->bufflen = cmd->request_bufflen;
468
469 return 1;
470}
471
472/*
473 * Function: scsi_setup_cmd_retry()
474 *
475 * Purpose: Restore the command state for a retry
476 *
477 * Arguments: cmd - command to be restored
478 *
479 * Returns: Nothing
480 *
481 * Notes: Immediately prior to retrying a command, we need
482 * to restore certain fields that we saved above.
483 */
484void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
485{
486 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
487 cmd->request_buffer = cmd->buffer;
488 cmd->request_bufflen = cmd->bufflen;
489 cmd->use_sg = cmd->old_use_sg;
490 cmd->cmd_len = cmd->old_cmd_len;
491 cmd->sc_data_direction = cmd->sc_old_data_direction;
492 cmd->underflow = cmd->old_underflow;
493} 449}
494 450
495void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -595,7 +551,15 @@ static void scsi_run_queue(struct request_queue *q)
595 list_del_init(&sdev->starved_entry); 551 list_del_init(&sdev->starved_entry);
596 spin_unlock_irqrestore(shost->host_lock, flags); 552 spin_unlock_irqrestore(shost->host_lock, flags);
597 553
598 blk_run_queue(sdev->request_queue); 554
555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
556 !test_and_set_bit(QUEUE_FLAG_REENTER,
557 &sdev->request_queue->queue_flags)) {
558 blk_run_queue(sdev->request_queue);
559 clear_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 } else
562 blk_run_queue(sdev->request_queue);
599 563
600 spin_lock_irqsave(shost->host_lock, flags); 564 spin_lock_irqsave(shost->host_lock, flags);
601 if (unlikely(!list_empty(&sdev->starved_entry))) 565 if (unlikely(!list_empty(&sdev->starved_entry)))
@@ -807,22 +771,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
807 */ 771 */
808static void scsi_release_buffers(struct scsi_cmnd *cmd) 772static void scsi_release_buffers(struct scsi_cmnd *cmd)
809{ 773{
810 struct request *req = cmd->request;
811
812 /*
813 * Free up any indirection buffers we allocated for DMA purposes.
814 */
815 if (cmd->use_sg) 774 if (cmd->use_sg)
816 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 775 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
817 else if (cmd->request_buffer != req->buffer)
818 kfree(cmd->request_buffer);
819 776
820 /* 777 /*
821 * Zero these out. They now point to freed memory, and it is 778 * Zero these out. They now point to freed memory, and it is
822 * dangerous to hang onto the pointers. 779 * dangerous to hang onto the pointers.
823 */ 780 */
824 cmd->buffer = NULL;
825 cmd->bufflen = 0;
826 cmd->request_buffer = NULL; 781 cmd->request_buffer = NULL;
827 cmd->request_bufflen = 0; 782 cmd->request_bufflen = 0;
828} 783}
@@ -855,11 +810,10 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
855 * b) We can just use scsi_requeue_command() here. This would 810 * b) We can just use scsi_requeue_command() here. This would
856 * be used if we just wanted to retry, for example. 811 * be used if we just wanted to retry, for example.
857 */ 812 */
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, 813void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 unsigned int block_bytes)
860{ 814{
861 int result = cmd->result; 815 int result = cmd->result;
862 int this_count = cmd->bufflen; 816 int this_count = cmd->request_bufflen;
863 request_queue_t *q = cmd->device->request_queue; 817 request_queue_t *q = cmd->device->request_queue;
864 struct request *req = cmd->request; 818 struct request *req = cmd->request;
865 int clear_errors = 1; 819 int clear_errors = 1;
@@ -867,28 +821,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
867 int sense_valid = 0; 821 int sense_valid = 0;
868 int sense_deferred = 0; 822 int sense_deferred = 0;
869 823
870 /* 824 scsi_release_buffers(cmd);
871 * Free up any indirection buffers we allocated for DMA purposes.
872 * For the case of a READ, we need to copy the data out of the
873 * bounce buffer and into the real buffer.
874 */
875 if (cmd->use_sg)
876 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
877 else if (cmd->buffer != req->buffer) {
878 if (rq_data_dir(req) == READ) {
879 unsigned long flags;
880 char *to = bio_kmap_irq(req->bio, &flags);
881 memcpy(to, cmd->buffer, cmd->bufflen);
882 bio_kunmap_irq(to, &flags);
883 }
884 kfree(cmd->buffer);
885 }
886 825
887 if (result) { 826 if (result) {
888 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 827 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
889 if (sense_valid) 828 if (sense_valid)
890 sense_deferred = scsi_sense_is_deferred(&sshdr); 829 sense_deferred = scsi_sense_is_deferred(&sshdr);
891 } 830 }
831
892 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 832 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
893 req->errors = result; 833 req->errors = result;
894 if (result) { 834 if (result) {
@@ -909,99 +849,73 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
909 } 849 }
910 850
911 /* 851 /*
912 * Zero these out. They now point to freed memory, and it is
913 * dangerous to hang onto the pointers.
914 */
915 cmd->buffer = NULL;
916 cmd->bufflen = 0;
917 cmd->request_buffer = NULL;
918 cmd->request_bufflen = 0;
919
920 /*
921 * Next deal with any sectors which we were able to correctly 852 * Next deal with any sectors which we were able to correctly
922 * handle. 853 * handle.
923 */ 854 */
924 if (good_bytes >= 0) { 855 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
925 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n", 856 "%d bytes done.\n",
926 req->nr_sectors, good_bytes)); 857 req->nr_sectors, good_bytes));
927 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); 858 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
928 859
929 if (clear_errors) 860 if (clear_errors)
930 req->errors = 0; 861 req->errors = 0;
931 /*
932 * If multiple sectors are requested in one buffer, then
933 * they will have been finished off by the first command.
934 * If not, then we have a multi-buffer command.
935 *
936 * If block_bytes != 0, it means we had a medium error
937 * of some sort, and that we want to mark some number of
938 * sectors as not uptodate. Thus we want to inhibit
939 * requeueing right here - we will requeue down below
940 * when we handle the bad sectors.
941 */
942 862
943 /* 863 /* A number of bytes were successfully read. If there
944 * If the command completed without error, then either 864 * are leftovers and there is some kind of error
945 * finish off the rest of the command, or start a new one. 865 * (result != 0), retry the rest.
946 */ 866 */
947 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 867 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
948 return; 868 return;
949 } 869
950 /* 870 /* good_bytes = 0, or (inclusive) there were leftovers and
951 * Now, if we were good little boys and girls, Santa left us a request 871 * result = 0, so scsi_end_request couldn't retry.
952 * sense buffer. We can extract information from this, so we
953 * can choose a block to remap, etc.
954 */ 872 */
955 if (sense_valid && !sense_deferred) { 873 if (sense_valid && !sense_deferred) {
956 switch (sshdr.sense_key) { 874 switch (sshdr.sense_key) {
957 case UNIT_ATTENTION: 875 case UNIT_ATTENTION:
958 if (cmd->device->removable) { 876 if (cmd->device->removable) {
959 /* detected disc change. set a bit 877 /* Detected disc change. Set a bit
960 * and quietly refuse further access. 878 * and quietly refuse further access.
961 */ 879 */
962 cmd->device->changed = 1; 880 cmd->device->changed = 1;
963 scsi_end_request(cmd, 0, 881 scsi_end_request(cmd, 0, this_count, 1);
964 this_count, 1);
965 return; 882 return;
966 } else { 883 } else {
967 /* 884 /* Must have been a power glitch, or a
968 * Must have been a power glitch, or a 885 * bus reset. Could not have been a
969 * bus reset. Could not have been a 886 * media change, so we just retry the
970 * media change, so we just retry the 887 * request and see what happens.
971 * request and see what happens. 888 */
972 */
973 scsi_requeue_command(q, cmd); 889 scsi_requeue_command(q, cmd);
974 return; 890 return;
975 } 891 }
976 break; 892 break;
977 case ILLEGAL_REQUEST: 893 case ILLEGAL_REQUEST:
978 /* 894 /* If we had an ILLEGAL REQUEST returned, then
979 * If we had an ILLEGAL REQUEST returned, then we may 895 * we may have performed an unsupported
980 * have performed an unsupported command. The only 896 * command. The only thing this should be
981 * thing this should be would be a ten byte read where 897 * would be a ten byte read where only a six
982 * only a six byte read was supported. Also, on a 898 * byte read was supported. Also, on a system
983 * system where READ CAPACITY failed, we may have read 899 * where READ CAPACITY failed, we may have
984 * past the end of the disk. 900 * read past the end of the disk.
985 */ 901 */
986 if ((cmd->device->use_10_for_rw && 902 if ((cmd->device->use_10_for_rw &&
987 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 903 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
988 (cmd->cmnd[0] == READ_10 || 904 (cmd->cmnd[0] == READ_10 ||
989 cmd->cmnd[0] == WRITE_10)) { 905 cmd->cmnd[0] == WRITE_10)) {
990 cmd->device->use_10_for_rw = 0; 906 cmd->device->use_10_for_rw = 0;
991 /* 907 /* This will cause a retry with a
992 * This will cause a retry with a 6-byte 908 * 6-byte command.
993 * command.
994 */ 909 */
995 scsi_requeue_command(q, cmd); 910 scsi_requeue_command(q, cmd);
996 result = 0; 911 return;
997 } else { 912 } else {
998 scsi_end_request(cmd, 0, this_count, 1); 913 scsi_end_request(cmd, 0, this_count, 1);
999 return; 914 return;
1000 } 915 }
1001 break; 916 break;
1002 case NOT_READY: 917 case NOT_READY:
1003 /* 918 /* If the device is in the process of becoming
1004 * If the device is in the process of becoming
1005 * ready, or has a temporary blockage, retry. 919 * ready, or has a temporary blockage, retry.
1006 */ 920 */
1007 if (sshdr.asc == 0x04) { 921 if (sshdr.asc == 0x04) {
@@ -1021,7 +935,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1021 } 935 }
1022 if (!(req->flags & REQ_QUIET)) { 936 if (!(req->flags & REQ_QUIET)) {
1023 scmd_printk(KERN_INFO, cmd, 937 scmd_printk(KERN_INFO, cmd,
1024 "Device not ready: "); 938 "Device not ready: ");
1025 scsi_print_sense_hdr("", &sshdr); 939 scsi_print_sense_hdr("", &sshdr);
1026 } 940 }
1027 scsi_end_request(cmd, 0, this_count, 1); 941 scsi_end_request(cmd, 0, this_count, 1);
@@ -1029,21 +943,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1029 case VOLUME_OVERFLOW: 943 case VOLUME_OVERFLOW:
1030 if (!(req->flags & REQ_QUIET)) { 944 if (!(req->flags & REQ_QUIET)) {
1031 scmd_printk(KERN_INFO, cmd, 945 scmd_printk(KERN_INFO, cmd,
1032 "Volume overflow, CDB: "); 946 "Volume overflow, CDB: ");
1033 __scsi_print_command(cmd->data_cmnd); 947 __scsi_print_command(cmd->cmnd);
1034 scsi_print_sense("", cmd); 948 scsi_print_sense("", cmd);
1035 } 949 }
1036 scsi_end_request(cmd, 0, block_bytes, 1); 950 /* See SSC3rXX or current. */
951 scsi_end_request(cmd, 0, this_count, 1);
1037 return; 952 return;
1038 default: 953 default:
1039 break; 954 break;
1040 } 955 }
1041 } /* driver byte != 0 */ 956 }
1042 if (host_byte(result) == DID_RESET) { 957 if (host_byte(result) == DID_RESET) {
1043 /* 958 /* Third party bus reset or reset for error recovery
1044 * Third party bus reset or reset for error 959 * reasons. Just retry the request and see what
1045 * recovery reasons. Just retry the request 960 * happens.
1046 * and see what happens.
1047 */ 961 */
1048 scsi_requeue_command(q, cmd); 962 scsi_requeue_command(q, cmd);
1049 return; 963 return;
@@ -1051,21 +965,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1051 if (result) { 965 if (result) {
1052 if (!(req->flags & REQ_QUIET)) { 966 if (!(req->flags & REQ_QUIET)) {
1053 scmd_printk(KERN_INFO, cmd, 967 scmd_printk(KERN_INFO, cmd,
1054 "SCSI error: return code = 0x%x\n", result); 968 "SCSI error: return code = 0x%08x\n",
1055 969 result);
1056 if (driver_byte(result) & DRIVER_SENSE) 970 if (driver_byte(result) & DRIVER_SENSE)
1057 scsi_print_sense("", cmd); 971 scsi_print_sense("", cmd);
1058 } 972 }
1059 /*
1060 * Mark a single buffer as not uptodate. Queue the remainder.
1061 * We sometimes get this cruft in the event that a medium error
1062 * isn't properly reported.
1063 */
1064 block_bytes = req->hard_cur_sectors << 9;
1065 if (!block_bytes)
1066 block_bytes = req->data_len;
1067 scsi_end_request(cmd, 0, block_bytes, 1);
1068 } 973 }
974 scsi_end_request(cmd, 0, this_count, !result);
1069} 975}
1070EXPORT_SYMBOL(scsi_io_completion); 976EXPORT_SYMBOL(scsi_io_completion);
1071 977
@@ -1169,7 +1075,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1169 * successfully. Since this is a REQ_BLOCK_PC command the 1075 * successfully. Since this is a REQ_BLOCK_PC command the
1170 * caller should check the request's errors value 1076 * caller should check the request's errors value
1171 */ 1077 */
1172 scsi_io_completion(cmd, cmd->bufflen, 0); 1078 scsi_io_completion(cmd, cmd->request_bufflen);
1173} 1079}
1174 1080
1175static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1081static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
@@ -2050,6 +1956,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2050 switch (oldstate) { 1956 switch (oldstate) {
2051 case SDEV_CREATED: 1957 case SDEV_CREATED:
2052 case SDEV_RUNNING: 1958 case SDEV_RUNNING:
1959 case SDEV_QUIESCE:
2053 case SDEV_OFFLINE: 1960 case SDEV_OFFLINE:
2054 case SDEV_BLOCK: 1961 case SDEV_BLOCK:
2055 break; 1962 break;
@@ -2060,6 +1967,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2060 1967
2061 case SDEV_DEL: 1968 case SDEV_DEL:
2062 switch (oldstate) { 1969 switch (oldstate) {
1970 case SDEV_CREATED:
1971 case SDEV_RUNNING:
1972 case SDEV_OFFLINE:
2063 case SDEV_CANCEL: 1973 case SDEV_CANCEL:
2064 break; 1974 break;
2065 default: 1975 default:
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index a3e2af6a846c..1f65139e14f8 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -1,7 +1,6 @@
1#ifndef _SCSI_LOGGING_H 1#ifndef _SCSI_LOGGING_H
2#define _SCSI_LOGGING_H 2#define _SCSI_LOGGING_H
3 3
4#include <linux/config.h>
5 4
6/* 5/*
7 * This defines the scsi logging feature. It is a means by which the user 6 * This defines the scsi logging feature. It is a means by which the user
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
new file mode 100644
index 000000000000..1b59b27e887f
--- /dev/null
+++ b/drivers/scsi/scsi_netlink.c
@@ -0,0 +1,199 @@
1/*
2 * scsi_netlink.c - SCSI Transport Netlink Interface
3 *
4 * Copyright (C) 2006 James Smart, Emulex Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <linux/time.h>
22#include <linux/jiffies.h>
23#include <linux/security.h>
24#include <net/sock.h>
25#include <net/netlink.h>
26
27#include <scsi/scsi_netlink.h>
28#include "scsi_priv.h"
29
30struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock);
32
33
34/**
35 * scsi_nl_rcv_msg -
36 * Receive message handler. Extracts message from a receive buffer.
37 * Validates message header and calls appropriate transport message handler
38 *
39 * @skb: socket receive buffer
40 *
41 **/
42static void
43scsi_nl_rcv_msg(struct sk_buff *skb)
44{
45 struct nlmsghdr *nlh;
46 struct scsi_nl_hdr *hdr;
47 uint32_t rlen;
48 int err;
49
50 while (skb->len >= NLMSG_SPACE(0)) {
51 err = 0;
52
53 nlh = (struct nlmsghdr *) skb->data;
54 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
55 (skb->len < nlh->nlmsg_len)) {
56 printk(KERN_WARNING "%s: discarding partial skb\n",
57 __FUNCTION__);
58 return;
59 }
60
61 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
62 if (rlen > skb->len)
63 rlen = skb->len;
64
65 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
66 err = -EBADMSG;
67 goto next_msg;
68 }
69
70 hdr = NLMSG_DATA(nlh);
71 if ((hdr->version != SCSI_NL_VERSION) ||
72 (hdr->magic != SCSI_NL_MAGIC)) {
73 err = -EPROTOTYPE;
74 goto next_msg;
75 }
76
77 if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
78 err = -EPERM;
79 goto next_msg;
80 }
81
82 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
83 printk(KERN_WARNING "%s: discarding partial message\n",
84 __FUNCTION__);
85 return;
86 }
87
88 /*
89 * We currently don't support anyone sending us a message
90 */
91
92next_msg:
93 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
94 netlink_ack(skb, nlh, err);
95
96 skb_pull(skb, rlen);
97 }
98}
99
100
101/**
102 * scsi_nl_rcv_msg -
103 * Receive handler for a socket. Extracts a received message buffer from
104 * the socket, and starts message processing.
105 *
106 * @sk: socket
107 * @len: unused
108 *
109 **/
110static void
111scsi_nl_rcv(struct sock *sk, int len)
112{
113 struct sk_buff *skb;
114
115 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
116 scsi_nl_rcv_msg(skb);
117 kfree_skb(skb);
118 }
119}
120
121
122/**
123 * scsi_nl_rcv_event -
124 * Event handler for a netlink socket.
125 *
126 * @this: event notifier block
127 * @event: event type
128 * @ptr: event payload
129 *
130 **/
131static int
132scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
133{
134 struct netlink_notify *n = ptr;
135
136 if (n->protocol != NETLINK_SCSITRANSPORT)
137 return NOTIFY_DONE;
138
139 /*
140 * Currently, we are not tracking PID's, etc. There is nothing
141 * to handle.
142 */
143
144 return NOTIFY_DONE;
145}
146
147static struct notifier_block scsi_netlink_notifier = {
148 .notifier_call = scsi_nl_rcv_event,
149};
150
151
152/**
153 * scsi_netlink_init -
154 * Called by SCSI subsystem to intialize the SCSI transport netlink
155 * interface
156 *
157 **/
158void
159scsi_netlink_init(void)
160{
161 int error;
162
163 error = netlink_register_notifier(&scsi_netlink_notifier);
164 if (error) {
165 printk(KERN_ERR "%s: register of event handler failed - %d\n",
166 __FUNCTION__, error);
167 return;
168 }
169
170 scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT,
171 SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE);
172 if (!scsi_nl_sock) {
173 printk(KERN_ERR "%s: register of recieve handler failed\n",
174 __FUNCTION__);
175 netlink_unregister_notifier(&scsi_netlink_notifier);
176 }
177
178 return;
179}
180
181
182/**
183 * scsi_netlink_exit -
184 * Called by SCSI subsystem to disable the SCSI transport netlink
185 * interface
186 *
187 **/
188void
189scsi_netlink_exit(void)
190{
191 if (scsi_nl_sock) {
192 sock_release(scsi_nl_sock->sk_socket);
193 netlink_unregister_notifier(&scsi_netlink_notifier);
194 }
195
196 return;
197}
198
199
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a1727a0e1bdd..5d023d44e5e7 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -1,7 +1,6 @@
1#ifndef _SCSI_PRIV_H 1#ifndef _SCSI_PRIV_H
2#define _SCSI_PRIV_H 2#define _SCSI_PRIV_H
3 3
4#include <linux/config.h>
5#include <linux/device.h> 4#include <linux/device.h>
6 5
7struct request_queue; 6struct request_queue;
@@ -9,6 +8,7 @@ struct scsi_cmnd;
9struct scsi_device; 8struct scsi_device;
10struct scsi_host_template; 9struct scsi_host_template;
11struct Scsi_Host; 10struct Scsi_Host;
11struct scsi_nl_hdr;
12 12
13 13
14/* 14/*
@@ -58,7 +58,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
58 58
59/* scsi_lib.c */ 59/* scsi_lib.c */
60extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 60extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
61extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
62extern void scsi_device_unbusy(struct scsi_device *sdev); 61extern void scsi_device_unbusy(struct scsi_device *sdev);
63extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 62extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
64extern void scsi_next_command(struct scsi_cmnd *cmd); 63extern void scsi_next_command(struct scsi_cmnd *cmd);
@@ -112,12 +111,22 @@ extern void __scsi_remove_device(struct scsi_device *);
112 111
113extern struct bus_type scsi_bus_type; 112extern struct bus_type scsi_bus_type;
114 113
114/* scsi_netlink.c */
115#ifdef CONFIG_SCSI_NETLINK
116extern void scsi_netlink_init(void);
117extern void scsi_netlink_exit(void);
118extern struct sock *scsi_nl_sock;
119#else
120static inline void scsi_netlink_init(void) {}
121static inline void scsi_netlink_exit(void) {}
122#endif
123
115/* 124/*
116 * internal scsi timeout functions: for use by mid-layer and transport 125 * internal scsi timeout functions: for use by mid-layer and transport
117 * classes. 126 * classes.
118 */ 127 */
119 128
120#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT (HZ*60) 129#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
121extern int scsi_internal_device_block(struct scsi_device *sdev); 130extern int scsi_internal_device_block(struct scsi_device *sdev);
122extern int scsi_internal_device_unblock(struct scsi_device *sdev); 131extern int scsi_internal_device_unblock(struct scsi_device *sdev);
123 132
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 55200e4fdf11..524a5f7a5193 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -178,9 +178,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
178 178
179 seq_printf(s, "\n"); 179 seq_printf(s, "\n");
180 180
181 seq_printf(s, " Type: %s ", 181 seq_printf(s, " Type: %s ", scsi_device_type(sdev->type));
182 sdev->type < MAX_SCSI_DEVICE_CODE ?
183 scsi_device_types[(int) sdev->type] : "Unknown ");
184 seq_printf(s, " ANSI" 182 seq_printf(s, " ANSI"
185 " SCSI revision: %02x", (sdev->scsi_level - 1) ? 183 " SCSI revision: %02x", (sdev->scsi_level - 1) ?
186 sdev->scsi_level - 1 : 1); 184 sdev->scsi_level - 1 : 1);
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index d76e6e3d8ca5..e1edab45a37b 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -2,7 +2,8 @@
2#define _SCSI_SAS_INTERNAL_H 2#define _SCSI_SAS_INTERNAL_H
3 3
4#define SAS_HOST_ATTRS 0 4#define SAS_HOST_ATTRS 0
5#define SAS_PORT_ATTRS 17 5#define SAS_PHY_ATTRS 17
6#define SAS_PORT_ATTRS 1
6#define SAS_RPORT_ATTRS 7 7#define SAS_RPORT_ATTRS 7
7#define SAS_END_DEV_ATTRS 3 8#define SAS_END_DEV_ATTRS 3
8#define SAS_EXPANDER_ATTRS 7 9#define SAS_EXPANDER_ATTRS 7
@@ -13,12 +14,14 @@ struct sas_internal {
13 struct sas_domain_function_template *dft; 14 struct sas_domain_function_template *dft;
14 15
15 struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS]; 16 struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
16 struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS]; 17 struct class_device_attribute private_phy_attrs[SAS_PHY_ATTRS];
18 struct class_device_attribute private_port_attrs[SAS_PORT_ATTRS];
17 struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS]; 19 struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
18 struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS]; 20 struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
19 struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS]; 21 struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
20 22
21 struct transport_container phy_attr_cont; 23 struct transport_container phy_attr_cont;
24 struct transport_container port_attr_cont;
22 struct transport_container rphy_attr_cont; 25 struct transport_container rphy_attr_cont;
23 struct transport_container end_dev_attr_cont; 26 struct transport_container end_dev_attr_cont;
24 struct transport_container expander_attr_cont; 27 struct transport_container expander_attr_cont;
@@ -28,7 +31,8 @@ struct sas_internal {
28 * needed by scsi_sysfs.c 31 * needed by scsi_sysfs.c
29 */ 32 */
30 struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1]; 33 struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
31 struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1]; 34 struct class_device_attribute *phy_attrs[SAS_PHY_ATTRS + 1];
35 struct class_device_attribute *port_attrs[SAS_PORT_ATTRS + 1];
32 struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1]; 36 struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
33 struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1]; 37 struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
34 struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1]; 38 struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0f7e6f94d66b..fd9e281c3bfe 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -25,7 +25,6 @@
25 * or a LUN is seen that cannot have a device attached to it. 25 * or a LUN is seen that cannot have a device attached to it.
26 */ 26 */
27 27
28#include <linux/config.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
31#include <linux/init.h> 30#include <linux/init.h>
@@ -135,59 +134,6 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
135} 134}
136 135
137/** 136/**
138 * print_inquiry - printk the inquiry information
139 * @inq_result: printk this SCSI INQUIRY
140 *
141 * Description:
142 * printk the vendor, model, and other information found in the
143 * INQUIRY data in @inq_result.
144 *
145 * Notes:
146 * Remove this, and replace with a hotplug event that logs any
147 * relevant information.
148 **/
149static void print_inquiry(unsigned char *inq_result)
150{
151 int i;
152
153 printk(KERN_NOTICE " Vendor: ");
154 for (i = 8; i < 16; i++)
155 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
156 printk("%c", inq_result[i]);
157 else
158 printk(" ");
159
160 printk(" Model: ");
161 for (i = 16; i < 32; i++)
162 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
163 printk("%c", inq_result[i]);
164 else
165 printk(" ");
166
167 printk(" Rev: ");
168 for (i = 32; i < 36; i++)
169 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
170 printk("%c", inq_result[i]);
171 else
172 printk(" ");
173
174 printk("\n");
175
176 i = inq_result[0] & 0x1f;
177
178 printk(KERN_NOTICE " Type: %s ",
179 i <
180 MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
181 "Unknown ");
182 printk(" ANSI SCSI revision: %02x",
183 inq_result[2] & 0x07);
184 if ((inq_result[2] & 0x07) == 1 && (inq_result[3] & 0x0f) == 1)
185 printk(" CCS\n");
186 else
187 printk("\n");
188}
189
190/**
191 * scsi_alloc_sdev - allocate and setup a scsi_Device 137 * scsi_alloc_sdev - allocate and setup a scsi_Device
192 * 138 *
193 * Description: 139 * Description:
@@ -320,6 +266,18 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
320 return found_starget; 266 return found_starget;
321} 267}
322 268
269/**
270 * scsi_alloc_target - allocate a new or find an existing target
271 * @parent: parent of the target (need not be a scsi host)
272 * @channel: target channel number (zero if no channels)
273 * @id: target id number
274 *
275 * Return an existing target if one exists, provided it hasn't already
276 * gone into STARGET_DEL state, otherwise allocate a new target.
277 *
278 * The target is returned with an incremented reference, so the caller
279 * is responsible for both reaping and doing a last put
280 */
323static struct scsi_target *scsi_alloc_target(struct device *parent, 281static struct scsi_target *scsi_alloc_target(struct device *parent,
324 int channel, uint id) 282 int channel, uint id)
325{ 283{
@@ -385,14 +343,15 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
385 return NULL; 343 return NULL;
386 } 344 }
387 } 345 }
346 get_device(dev);
388 347
389 return starget; 348 return starget;
390 349
391 found: 350 found:
392 found_target->reap_ref++; 351 found_target->reap_ref++;
393 spin_unlock_irqrestore(shost->host_lock, flags); 352 spin_unlock_irqrestore(shost->host_lock, flags);
394 put_device(parent);
395 if (found_target->state != STARGET_DEL) { 353 if (found_target->state != STARGET_DEL) {
354 put_device(parent);
396 kfree(starget); 355 kfree(starget);
397 return found_target; 356 return found_target;
398 } 357 }
@@ -451,6 +410,32 @@ void scsi_target_reap(struct scsi_target *starget)
451} 410}
452 411
453/** 412/**
413 * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
414 * @s: INQUIRY result string to sanitize
415 * @len: length of the string
416 *
417 * Description:
418 * The SCSI spec says that INQUIRY vendor, product, and revision
419 * strings must consist entirely of graphic ASCII characters,
420 * padded on the right with spaces. Since not all devices obey
421 * this rule, we will replace non-graphic or non-ASCII characters
422 * with spaces. Exception: a NUL character is interpreted as a
423 * string terminator, so all the following characters are set to
424 * spaces.
425 **/
426static void sanitize_inquiry_string(unsigned char *s, int len)
427{
428 int terminated = 0;
429
430 for (; len > 0; (--len, ++s)) {
431 if (*s == 0)
432 terminated = 1;
433 if (terminated || *s < 0x20 || *s > 0x7e)
434 *s = ' ';
435 }
436}
437
438/**
454 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 439 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
455 * @sdev: scsi_device to probe 440 * @sdev: scsi_device to probe
456 * @inq_result: area to store the INQUIRY result 441 * @inq_result: area to store the INQUIRY result
@@ -464,7 +449,7 @@ void scsi_target_reap(struct scsi_target *starget)
464 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 449 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
465 * are copied to the scsi_device any flags value is stored in *@bflags. 450 * are copied to the scsi_device any flags value is stored in *@bflags.
466 **/ 451 **/
467static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, 452static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
468 int result_len, int *bflags) 453 int result_len, int *bflags)
469{ 454{
470 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 455 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -523,7 +508,11 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
523 } 508 }
524 509
525 if (result == 0) { 510 if (result == 0) {
526 response_len = (unsigned char) inq_result[4] + 5; 511 sanitize_inquiry_string(&inq_result[8], 8);
512 sanitize_inquiry_string(&inq_result[16], 16);
513 sanitize_inquiry_string(&inq_result[32], 4);
514
515 response_len = inq_result[4] + 5;
527 if (response_len > 255) 516 if (response_len > 255)
528 response_len = first_inquiry_len; /* sanity */ 517 response_len = first_inquiry_len; /* sanity */
529 518
@@ -629,7 +618,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
629 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 618 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
630 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 619 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
631 **/ 620 **/
632static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) 621static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
622 int *bflags)
633{ 623{
634 /* 624 /*
635 * XXX do not save the inquiry, since it can change underneath us, 625 * XXX do not save the inquiry, since it can change underneath us,
@@ -654,9 +644,8 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
654 if (*bflags & BLIST_ISROM) { 644 if (*bflags & BLIST_ISROM) {
655 /* 645 /*
656 * It would be better to modify sdev->type, and set 646 * It would be better to modify sdev->type, and set
657 * sdev->removable, but then the print_inquiry() output 647 * sdev->removable; this can now be done since
658 * would not show TYPE_ROM; if print_inquiry() is removed 648 * print_inquiry has gone away.
659 * the issue goes away.
660 */ 649 */
661 inq_result[0] = TYPE_ROM; 650 inq_result[0] = TYPE_ROM;
662 inq_result[1] |= 0x80; /* removable */ 651 inq_result[1] |= 0x80; /* removable */
@@ -685,8 +674,6 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
685 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); 674 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
686 } 675 }
687 676
688 print_inquiry(inq_result);
689
690 /* 677 /*
691 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 678 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
692 * spec says: The device server is capable of supporting the 679 * spec says: The device server is capable of supporting the
@@ -716,6 +703,12 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
716 if (inq_result[7] & 0x10) 703 if (inq_result[7] & 0x10)
717 sdev->sdtr = 1; 704 sdev->sdtr = 1;
718 705
706 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
707 "ANSI: %d%s\n", scsi_device_type(sdev->type),
708 sdev->vendor, sdev->model, sdev->rev,
709 sdev->inq_periph_qual, inq_result[2] & 0x07,
710 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
711
719 /* 712 /*
720 * End sysfs code. 713 * End sysfs code.
721 */ 714 */
@@ -810,6 +803,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
810 803
811static inline void scsi_destroy_sdev(struct scsi_device *sdev) 804static inline void scsi_destroy_sdev(struct scsi_device *sdev)
812{ 805{
806 scsi_device_set_state(sdev, SDEV_DEL);
813 if (sdev->host->hostt->slave_destroy) 807 if (sdev->host->hostt->slave_destroy)
814 sdev->host->hostt->slave_destroy(sdev); 808 sdev->host->hostt->slave_destroy(sdev);
815 transport_destroy_device(&sdev->sdev_gendev); 809 transport_destroy_device(&sdev->sdev_gendev);
@@ -943,11 +937,26 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
943 } 937 }
944 938
945 /* 939 /*
946 * Non-standard SCSI targets may set the PDT to 0x1f (unknown or 940 * Some targets may set slight variations of PQ and PDT to signal
947 * no device type) instead of using the Peripheral Qualifier to 941 * that no LUN is present, so don't add sdev in these cases.
948 * indicate that no LUN is present. For example, USB UFI does this. 942 * Two specific examples are:
943 * 1) NetApp targets: return PQ=1, PDT=0x1f
944 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
945 * in the UFI 1.0 spec (we cannot rely on reserved bits).
946 *
947 * References:
948 * 1) SCSI SPC-3, pp. 145-146
949 * PQ=1: "A peripheral device having the specified peripheral
950 * device type is not connected to this logical unit. However, the
951 * device server is capable of supporting the specified peripheral
952 * device type on this logical unit."
953 * PDT=0x1f: "Unknown or no device type"
954 * 2) USB UFI 1.0, p. 20
955 * PDT=00h Direct-access device (floppy)
956 * PDT=1Fh none (no FDD connected to the requested logical unit)
949 */ 957 */
950 if (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f) { 958 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
959 (result[0] & 0x1f) == 0x1f) {
951 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 960 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
952 "scsi scan: peripheral device type" 961 "scsi scan: peripheral device type"
953 " of 31, no device added\n")); 962 " of 31, no device added\n"));
@@ -1345,7 +1354,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1345 if (!starget) 1354 if (!starget)
1346 return ERR_PTR(-ENOMEM); 1355 return ERR_PTR(-ENOMEM);
1347 1356
1348 get_device(&starget->dev);
1349 mutex_lock(&shost->scan_mutex); 1357 mutex_lock(&shost->scan_mutex);
1350 if (scsi_host_scan_allowed(shost)) 1358 if (scsi_host_scan_allowed(shost))
1351 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1359 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
@@ -1404,7 +1412,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1404 if (!starget) 1412 if (!starget)
1405 return; 1413 return;
1406 1414
1407 get_device(&starget->dev);
1408 if (lun != SCAN_WILD_CARD) { 1415 if (lun != SCAN_WILD_CARD) {
1409 /* 1416 /*
1410 * Scan for a specific host/chan/id/lun. 1417 * Scan for a specific host/chan/id/lun.
@@ -1586,7 +1593,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1586 if (sdev) { 1593 if (sdev) {
1587 sdev->sdev_gendev.parent = get_device(&starget->dev); 1594 sdev->sdev_gendev.parent = get_device(&starget->dev);
1588 sdev->borken = 0; 1595 sdev->borken = 0;
1589 } 1596 } else
1597 scsi_target_reap(starget);
1590 put_device(&starget->dev); 1598 put_device(&starget->dev);
1591 out: 1599 out:
1592 mutex_unlock(&shost->scan_mutex); 1600 mutex_unlock(&shost->scan_mutex);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5ec7a4fb0145..e7fe565b96de 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -6,7 +6,6 @@
6 * Created to pull SCSI mid layer sysfs routines into one file. 6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */ 7 */
8 8
9#include <linux/config.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/init.h> 10#include <linux/init.h>
12#include <linux/blkdev.h> 11#include <linux/blkdev.h>
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f2db7a41cf1d..38c215a78f69 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -32,6 +32,9 @@
32#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35#include <linux/netlink.h>
36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h>
35#include "scsi_priv.h" 38#include "scsi_priv.h"
36 39
37static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 40static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
@@ -93,6 +96,29 @@ fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
93#define FC_PORTTYPE_MAX_NAMELEN 50 96#define FC_PORTTYPE_MAX_NAMELEN 50
94 97
95 98
99/* Convert fc_host_event_code values to ascii string name */
100static const struct {
101 enum fc_host_event_code value;
102 char *name;
103} fc_host_event_code_names[] = {
104 { FCH_EVT_LIP, "lip" },
105 { FCH_EVT_LINKUP, "link_up" },
106 { FCH_EVT_LINKDOWN, "link_down" },
107 { FCH_EVT_LIPRESET, "lip_reset" },
108 { FCH_EVT_RSCN, "rscn" },
109 { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
110 { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
111 { FCH_EVT_PORT_ONLINE, "port_online" },
112 { FCH_EVT_PORT_OFFLINE, "port_offline" },
113 { FCH_EVT_PORT_FABRIC, "port_fabric" },
114 { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
115 { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
116};
117fc_enum_name_search(host_event_code, fc_host_event_code,
118 fc_host_event_code_names)
119#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
120
121
96/* Convert fc_port_state values to ascii string name */ 122/* Convert fc_port_state values to ascii string name */
97static struct { 123static struct {
98 enum fc_port_state value; 124 enum fc_port_state value;
@@ -216,6 +242,7 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
216 242
217 243
218static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(void *data);
245static void fc_timeout_fail_rport_io(void *data);
219static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(void *data);
220 247
221/* 248/*
@@ -223,7 +250,7 @@ static void fc_scsi_scan_rport(void *data);
223 * Increase these values if you add attributes 250 * Increase these values if you add attributes
224 */ 251 */
225#define FC_STARGET_NUM_ATTRS 3 252#define FC_STARGET_NUM_ATTRS 3
226#define FC_RPORT_NUM_ATTRS 9 253#define FC_RPORT_NUM_ATTRS 10
227#define FC_HOST_NUM_ATTRS 17 254#define FC_HOST_NUM_ATTRS 17
228 255
229struct fc_internal { 256struct fc_internal {
@@ -301,8 +328,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
301 fc_host->supported_classes = FC_COS_UNSPECIFIED; 328 fc_host->supported_classes = FC_COS_UNSPECIFIED;
302 memset(fc_host->supported_fc4s, 0, 329 memset(fc_host->supported_fc4s, 0,
303 sizeof(fc_host->supported_fc4s)); 330 sizeof(fc_host->supported_fc4s));
304 memset(fc_host->symbolic_name, 0,
305 sizeof(fc_host->symbolic_name));
306 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 331 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
307 fc_host->maxframe_size = -1; 332 fc_host->maxframe_size = -1;
308 memset(fc_host->serial_number, 0, 333 memset(fc_host->serial_number, 0,
@@ -315,6 +340,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
315 sizeof(fc_host->active_fc4s)); 340 sizeof(fc_host->active_fc4s));
316 fc_host->speed = FC_PORTSPEED_UNKNOWN; 341 fc_host->speed = FC_PORTSPEED_UNKNOWN;
317 fc_host->fabric_name = -1; 342 fc_host->fabric_name = -1;
343 memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
344 memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
318 345
319 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; 346 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
320 347
@@ -368,7 +395,7 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
368 * should insulate the loss of a remote port. 395 * should insulate the loss of a remote port.
369 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. 396 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
370 */ 397 */
371static unsigned int fc_dev_loss_tmo = SCSI_DEVICE_BLOCK_MAX_TIMEOUT; 398static unsigned int fc_dev_loss_tmo = 60; /* seconds */
372 399
373module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR); 400module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR);
374MODULE_PARM_DESC(dev_loss_tmo, 401MODULE_PARM_DESC(dev_loss_tmo,
@@ -377,10 +404,184 @@ MODULE_PARM_DESC(dev_loss_tmo,
377 " exceeded, the scsi target is removed. Value should be" 404 " exceeded, the scsi target is removed. Value should be"
378 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 405 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
379 406
407/**
408 * Netlink Infrastructure
409 **/
410
411static atomic_t fc_event_seq;
412
413/**
414 * fc_get_event_number - Obtain the next sequential FC event number
415 *
416 * Notes:
417 * We could have inline'd this, but it would have required fc_event_seq to
418 * be exposed. For now, live with the subroutine call.
419 * Atomic used to avoid lock/unlock...
420 **/
421u32
422fc_get_event_number(void)
423{
424 return atomic_add_return(1, &fc_event_seq);
425}
426EXPORT_SYMBOL(fc_get_event_number);
427
428
429/**
430 * fc_host_post_event - called to post an even on an fc_host.
431 *
432 * @shost: host the event occurred on
433 * @event_number: fc event number obtained from get_fc_event_number()
434 * @event_code: fc_host event being posted
435 * @event_data: 32bits of data for the event being posted
436 *
437 * Notes:
438 * This routine assumes no locks are held on entry.
439 **/
440void
441fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
442 enum fc_host_event_code event_code, u32 event_data)
443{
444 struct sk_buff *skb;
445 struct nlmsghdr *nlh;
446 struct fc_nl_event *event;
447 const char *name;
448 u32 len, skblen;
449 int err;
450
451 if (!scsi_nl_sock) {
452 err = -ENOENT;
453 goto send_fail;
454 }
455
456 len = FC_NL_MSGALIGN(sizeof(*event));
457 skblen = NLMSG_SPACE(len);
458
459 skb = alloc_skb(skblen, GFP_KERNEL);
460 if (!skb) {
461 err = -ENOBUFS;
462 goto send_fail;
463 }
464
465 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
466 skblen - sizeof(*nlh), 0);
467 if (!nlh) {
468 err = -ENOBUFS;
469 goto send_fail_skb;
470 }
471 event = NLMSG_DATA(nlh);
472
473 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
474 FC_NL_ASYNC_EVENT, len);
475 event->seconds = get_seconds();
476 event->vendor_id = 0;
477 event->host_no = shost->host_no;
478 event->event_datalen = sizeof(u32); /* bytes */
479 event->event_num = event_number;
480 event->event_code = event_code;
481 event->event_data = event_data;
482
483 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
484 GFP_KERNEL);
485 if (err && (err != -ESRCH)) /* filter no recipient errors */
486 /* nlmsg_multicast already kfree_skb'd */
487 goto send_fail;
488
489 return;
490
491send_fail_skb:
492 kfree_skb(skb);
493send_fail:
494 name = get_fc_host_event_code_name(event_code);
495 printk(KERN_WARNING
496 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
497 __FUNCTION__, shost->host_no,
498 (name) ? name : "<unknown>", event_data, err);
499 return;
500}
501EXPORT_SYMBOL(fc_host_post_event);
502
503
504/**
505 * fc_host_post_vendor_event - called to post a vendor unique event on
506 * a fc_host
507 *
508 * @shost: host the event occurred on
509 * @event_number: fc event number obtained from get_fc_event_number()
510 * @data_len: amount, in bytes, of vendor unique data
511 * @data_buf: pointer to vendor unique data
512 *
513 * Notes:
514 * This routine assumes no locks are held on entry.
515 **/
516void
517fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
518 u32 data_len, char * data_buf, u64 vendor_id)
519{
520 struct sk_buff *skb;
521 struct nlmsghdr *nlh;
522 struct fc_nl_event *event;
523 u32 len, skblen;
524 int err;
525
526 if (!scsi_nl_sock) {
527 err = -ENOENT;
528 goto send_vendor_fail;
529 }
530
531 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
532 skblen = NLMSG_SPACE(len);
533
534 skb = alloc_skb(skblen, GFP_KERNEL);
535 if (!skb) {
536 err = -ENOBUFS;
537 goto send_vendor_fail;
538 }
539
540 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
541 skblen - sizeof(*nlh), 0);
542 if (!nlh) {
543 err = -ENOBUFS;
544 goto send_vendor_fail_skb;
545 }
546 event = NLMSG_DATA(nlh);
547
548 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
549 FC_NL_ASYNC_EVENT, len);
550 event->seconds = get_seconds();
551 event->vendor_id = vendor_id;
552 event->host_no = shost->host_no;
553 event->event_datalen = data_len; /* bytes */
554 event->event_num = event_number;
555 event->event_code = FCH_EVT_VENDOR_UNIQUE;
556 memcpy(&event->event_data, data_buf, data_len);
557
558 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
559 GFP_KERNEL);
560 if (err && (err != -ESRCH)) /* filter no recipient errors */
561 /* nlmsg_multicast already kfree_skb'd */
562 goto send_vendor_fail;
563
564 return;
565
566send_vendor_fail_skb:
567 kfree_skb(skb);
568send_vendor_fail:
569 printk(KERN_WARNING
570 "%s: Dropped Event : host %d vendor_unique - err %d\n",
571 __FUNCTION__, shost->host_no, err);
572 return;
573}
574EXPORT_SYMBOL(fc_host_post_vendor_event);
575
576
380 577
381static __init int fc_transport_init(void) 578static __init int fc_transport_init(void)
382{ 579{
383 int error = transport_class_register(&fc_host_class); 580 int error;
581
582 atomic_set(&fc_event_seq, 0);
583
584 error = transport_class_register(&fc_host_class);
384 if (error) 585 if (error)
385 return error; 586 return error;
386 error = transport_class_register(&fc_rport_class); 587 error = transport_class_register(&fc_rport_class);
@@ -424,11 +625,14 @@ store_fc_rport_##field(struct class_device *cdev, const char *buf, \
424 struct fc_rport *rport = transport_class_to_rport(cdev); \ 625 struct fc_rport *rport = transport_class_to_rport(cdev); \
425 struct Scsi_Host *shost = rport_to_shost(rport); \ 626 struct Scsi_Host *shost = rport_to_shost(rport); \
426 struct fc_internal *i = to_fc_internal(shost->transportt); \ 627 struct fc_internal *i = to_fc_internal(shost->transportt); \
628 char *cp; \
427 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ 629 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
428 (rport->port_state == FC_PORTSTATE_DELETED) || \ 630 (rport->port_state == FC_PORTSTATE_DELETED) || \
429 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ 631 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
430 return -EBUSY; \ 632 return -EBUSY; \
431 val = simple_strtoul(buf, NULL, 0); \ 633 val = simple_strtoul(buf, &cp, 0); \
634 if (*cp && (*cp != '\n')) \
635 return -EINVAL; \
432 i->f->set_rport_##field(rport, val); \ 636 i->f->set_rport_##field(rport, val); \
433 return count; \ 637 return count; \
434} 638}
@@ -510,6 +714,13 @@ static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \
510 if (i->f->show_rport_##field) \ 714 if (i->f->show_rport_##field) \
511 count++ 715 count++
512 716
717#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
718{ \
719 i->private_rport_attrs[count] = class_device_attr_rport_##field; \
720 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
721 count++; \
722}
723
513 724
514/* The FC Transport Remote Port Attributes: */ 725/* The FC Transport Remote Port Attributes: */
515 726
@@ -542,12 +753,14 @@ store_fc_rport_dev_loss_tmo(struct class_device *cdev, const char *buf,
542 struct fc_rport *rport = transport_class_to_rport(cdev); 753 struct fc_rport *rport = transport_class_to_rport(cdev);
543 struct Scsi_Host *shost = rport_to_shost(rport); 754 struct Scsi_Host *shost = rport_to_shost(rport);
544 struct fc_internal *i = to_fc_internal(shost->transportt); 755 struct fc_internal *i = to_fc_internal(shost->transportt);
756 char *cp;
545 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 757 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
546 (rport->port_state == FC_PORTSTATE_DELETED) || 758 (rport->port_state == FC_PORTSTATE_DELETED) ||
547 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 759 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
548 return -EBUSY; 760 return -EBUSY;
549 val = simple_strtoul(buf, NULL, 0); 761 val = simple_strtoul(buf, &cp, 0);
550 if ((val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) 762 if ((*cp && (*cp != '\n')) ||
763 (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
551 return -EINVAL; 764 return -EINVAL;
552 i->f->set_rport_dev_loss_tmo(rport, val); 765 i->f->set_rport_dev_loss_tmo(rport, val);
553 return count; 766 return count;
@@ -597,6 +810,44 @@ static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
597fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 810fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
598fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); 811fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
599 812
813/*
814 * fast_io_fail_tmo attribute
815 */
816static ssize_t
817show_fc_rport_fast_io_fail_tmo (struct class_device *cdev, char *buf)
818{
819 struct fc_rport *rport = transport_class_to_rport(cdev);
820
821 if (rport->fast_io_fail_tmo == -1)
822 return snprintf(buf, 5, "off\n");
823 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
824}
825
826static ssize_t
827store_fc_rport_fast_io_fail_tmo(struct class_device *cdev, const char *buf,
828 size_t count)
829{
830 int val;
831 char *cp;
832 struct fc_rport *rport = transport_class_to_rport(cdev);
833
834 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
835 (rport->port_state == FC_PORTSTATE_DELETED) ||
836 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
837 return -EBUSY;
838 if (strncmp(buf, "off", 3) == 0)
839 rport->fast_io_fail_tmo = -1;
840 else {
841 val = simple_strtoul(buf, &cp, 0);
842 if ((*cp && (*cp != '\n')) ||
843 (val < 0) || (val >= rport->dev_loss_tmo))
844 return -EINVAL;
845 rport->fast_io_fail_tmo = val;
846 }
847 return count;
848}
849static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
850 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
600 851
601 852
602/* 853/*
@@ -682,12 +933,34 @@ store_fc_host_##field(struct class_device *cdev, const char *buf, \
682 int val; \ 933 int val; \
683 struct Scsi_Host *shost = transport_class_to_shost(cdev); \ 934 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
684 struct fc_internal *i = to_fc_internal(shost->transportt); \ 935 struct fc_internal *i = to_fc_internal(shost->transportt); \
936 char *cp; \
685 \ 937 \
686 val = simple_strtoul(buf, NULL, 0); \ 938 val = simple_strtoul(buf, &cp, 0); \
939 if (*cp && (*cp != '\n')) \
940 return -EINVAL; \
687 i->f->set_host_##field(shost, val); \ 941 i->f->set_host_##field(shost, val); \
688 return count; \ 942 return count; \
689} 943}
690 944
945#define fc_host_store_str_function(field, slen) \
946static ssize_t \
947store_fc_host_##field(struct class_device *cdev, const char *buf, \
948 size_t count) \
949{ \
950 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
951 struct fc_internal *i = to_fc_internal(shost->transportt); \
952 unsigned int cnt=count; \
953 \
954 /* count may include a LF at end of string */ \
955 if (buf[cnt-1] == '\n') \
956 cnt--; \
957 if (cnt > ((slen) - 1)) \
958 return -EINVAL; \
959 memcpy(fc_host_##field(shost), buf, cnt); \
960 i->f->set_host_##field(shost); \
961 return count; \
962}
963
691#define fc_host_rd_attr(field, format_string, sz) \ 964#define fc_host_rd_attr(field, format_string, sz) \
692 fc_host_show_function(field, format_string, sz, ) \ 965 fc_host_show_function(field, format_string, sz, ) \
693static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ 966static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \
@@ -815,7 +1088,6 @@ fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
815fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1088fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
816fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1089fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
817 unsigned long long); 1090 unsigned long long);
818fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1));
819fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1091fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
820fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1092fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
821 1093
@@ -858,6 +1130,13 @@ fc_host_rd_attr(port_id, "0x%06x\n", 20);
858fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); 1130fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
859fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 1131fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
860fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 1132fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1133fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1134
1135fc_private_host_show_function(system_hostname, "%s\n",
1136 FC_SYMBOLIC_NAME_SIZE + 1, )
1137fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1138static FC_CLASS_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1139 show_fc_host_system_hostname, store_fc_host_system_hostname);
861 1140
862 1141
863/* Private Host Attributes */ 1142/* Private Host Attributes */
@@ -1223,7 +1502,6 @@ fc_attach_transport(struct fc_function_template *ft)
1223 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); 1502 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
1224 SETUP_HOST_ATTRIBUTE_RD(supported_classes); 1503 SETUP_HOST_ATTRIBUTE_RD(supported_classes);
1225 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 1504 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
1226 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1227 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 1505 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
1228 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 1506 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
1229 SETUP_HOST_ATTRIBUTE_RD(serial_number); 1507 SETUP_HOST_ATTRIBUTE_RD(serial_number);
@@ -1234,6 +1512,8 @@ fc_attach_transport(struct fc_function_template *ft)
1234 SETUP_HOST_ATTRIBUTE_RD(active_fc4s); 1512 SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
1235 SETUP_HOST_ATTRIBUTE_RD(speed); 1513 SETUP_HOST_ATTRIBUTE_RD(speed);
1236 SETUP_HOST_ATTRIBUTE_RD(fabric_name); 1514 SETUP_HOST_ATTRIBUTE_RD(fabric_name);
1515 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1516 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
1237 1517
1238 /* Transport-managed attributes */ 1518 /* Transport-managed attributes */
1239 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 1519 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
@@ -1257,6 +1537,8 @@ fc_attach_transport(struct fc_function_template *ft)
1257 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); 1537 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
1258 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); 1538 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
1259 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); 1539 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
1540 if (ft->terminate_rport_io)
1541 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
1260 1542
1261 BUG_ON(count > FC_RPORT_NUM_ATTRS); 1543 BUG_ON(count > FC_RPORT_NUM_ATTRS);
1262 1544
@@ -1284,7 +1566,9 @@ EXPORT_SYMBOL(fc_release_transport);
1284 * @work: Work to queue for execution. 1566 * @work: Work to queue for execution.
1285 * 1567 *
1286 * Return value: 1568 * Return value:
1287 * 0 on success / != 0 for error 1569 * 1 - work queued for execution
1570 * 0 - work is already queued
1571 * -EINVAL - work queue doesn't exist
1288 **/ 1572 **/
1289static int 1573static int
1290fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) 1574fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
@@ -1326,7 +1610,7 @@ fc_flush_work(struct Scsi_Host *shost)
1326 * @delay: jiffies to delay the work queuing 1610 * @delay: jiffies to delay the work queuing
1327 * 1611 *
1328 * Return value: 1612 * Return value:
1329 * 0 on success / != 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1330 **/ 1614 **/
1331static int 1615static int
1332fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
@@ -1341,6 +1625,9 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1341 return -EINVAL; 1625 return -EINVAL;
1342 } 1626 }
1343 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1344 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1345} 1632}
1346 1633
@@ -1433,12 +1720,23 @@ fc_starget_delete(void *data)
1433 struct fc_rport *rport = (struct fc_rport *)data; 1720 struct fc_rport *rport = (struct fc_rport *)data;
1434 struct Scsi_Host *shost = rport_to_shost(rport); 1721 struct Scsi_Host *shost = rport_to_shost(rport);
1435 unsigned long flags; 1722 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt);
1436 1724
1437 scsi_target_unblock(&rport->dev); 1725 /*
1726 * Involve the LLDD if possible. All io on the rport is to
1727 * be terminated, either as part of the dev_loss_tmo callback
1728 * processing, or via the terminate_rport_io function.
1729 */
1730 if (i->f->dev_loss_tmo_callbk)
1731 i->f->dev_loss_tmo_callbk(rport);
1732 else if (i->f->terminate_rport_io)
1733 i->f->terminate_rport_io(rport);
1438 1734
1439 spin_lock_irqsave(shost->host_lock, flags); 1735 spin_lock_irqsave(shost->host_lock, flags);
1440 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { 1736 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1441 spin_unlock_irqrestore(shost->host_lock, flags); 1737 spin_unlock_irqrestore(shost->host_lock, flags);
1738 if (!cancel_delayed_work(&rport->fail_io_work))
1739 fc_flush_devloss(shost);
1442 if (!cancel_delayed_work(&rport->dev_loss_work)) 1740 if (!cancel_delayed_work(&rport->dev_loss_work))
1443 fc_flush_devloss(shost); 1741 fc_flush_devloss(shost);
1444 spin_lock_irqsave(shost->host_lock, flags); 1742 spin_lock_irqsave(shost->host_lock, flags);
@@ -1461,10 +1759,7 @@ fc_rport_final_delete(void *data)
1461 struct fc_rport *rport = (struct fc_rport *)data; 1759 struct fc_rport *rport = (struct fc_rport *)data;
1462 struct device *dev = &rport->dev; 1760 struct device *dev = &rport->dev;
1463 struct Scsi_Host *shost = rport_to_shost(rport); 1761 struct Scsi_Host *shost = rport_to_shost(rport);
1464 1762 struct fc_internal *i = to_fc_internal(shost->transportt);
1465 /* Delete SCSI target and sdevs */
1466 if (rport->scsi_target_id != -1)
1467 fc_starget_delete(data);
1468 1763
1469 /* 1764 /*
1470 * if a scan is pending, flush the SCSI Host work_q so that 1765 * if a scan is pending, flush the SCSI Host work_q so that
@@ -1473,10 +1768,19 @@ fc_rport_final_delete(void *data)
1473 if (rport->flags & FC_RPORT_SCAN_PENDING) 1768 if (rport->flags & FC_RPORT_SCAN_PENDING)
1474 scsi_flush_work(shost); 1769 scsi_flush_work(shost);
1475 1770
1771 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data);
1774 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io)
1777 i->f->terminate_rport_io(rport);
1778
1476 transport_remove_device(dev); 1779 transport_remove_device(dev);
1477 device_del(dev); 1780 device_del(dev);
1478 transport_destroy_device(dev); 1781 transport_destroy_device(dev);
1479 put_device(&shost->shost_gendev); 1782 put_device(&shost->shost_gendev); /* for fc_host->rport list */
1783 put_device(dev); /* for self-reference */
1480} 1784}
1481 1785
1482 1786
@@ -1523,8 +1827,10 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1523 if (fci->f->dd_fcrport_size) 1827 if (fci->f->dd_fcrport_size)
1524 rport->dd_data = &rport[1]; 1828 rport->dd_data = &rport[1];
1525 rport->channel = channel; 1829 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1;
1526 1831
1527 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
1528 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
1529 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
1530 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
@@ -1537,13 +1843,13 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1537 else 1843 else
1538 rport->scsi_target_id = -1; 1844 rport->scsi_target_id = -1;
1539 list_add_tail(&rport->peers, &fc_host->rports); 1845 list_add_tail(&rport->peers, &fc_host->rports);
1540 get_device(&shost->shost_gendev); 1846 get_device(&shost->shost_gendev); /* for fc_host->rport list */
1541 1847
1542 spin_unlock_irqrestore(shost->host_lock, flags); 1848 spin_unlock_irqrestore(shost->host_lock, flags);
1543 1849
1544 dev = &rport->dev; 1850 dev = &rport->dev;
1545 device_initialize(dev); 1851 device_initialize(dev); /* takes self reference */
1546 dev->parent = get_device(&shost->shost_gendev); 1852 dev->parent = get_device(&shost->shost_gendev); /* parent reference */
1547 dev->release = fc_rport_dev_release; 1853 dev->release = fc_rport_dev_release;
1548 sprintf(dev->bus_id, "rport-%d:%d-%d", 1854 sprintf(dev->bus_id, "rport-%d:%d-%d",
1549 shost->host_no, channel, rport->number); 1855 shost->host_no, channel, rport->number);
@@ -1567,10 +1873,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1567 1873
1568delete_rport: 1874delete_rport:
1569 transport_destroy_device(dev); 1875 transport_destroy_device(dev);
1570 put_device(dev->parent);
1571 spin_lock_irqsave(shost->host_lock, flags); 1876 spin_lock_irqsave(shost->host_lock, flags);
1572 list_del(&rport->peers); 1877 list_del(&rport->peers);
1573 put_device(&shost->shost_gendev); 1878 put_device(&shost->shost_gendev); /* for fc_host->rport list */
1574 spin_unlock_irqrestore(shost->host_lock, flags); 1879 spin_unlock_irqrestore(shost->host_lock, flags);
1575 put_device(dev->parent); 1880 put_device(dev->parent);
1576 kfree(rport); 1881 kfree(rport);
@@ -1689,11 +1994,13 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1689 /* restart the target */ 1994 /* restart the target */
1690 1995
1691 /* 1996 /*
1692 * Stop the target timer first. Take no action 1997 * Stop the target timers first. Take no action
1693 * on the del_timer failure as the state 1998 * on the del_timer failure as the state
1694 * machine state change will validate the 1999 * machine state change will validate the
1695 * transaction. 2000 * transaction.
1696 */ 2001 */
2002 if (!cancel_delayed_work(&rport->fail_io_work))
2003 fc_flush_devloss(shost);
1697 if (!cancel_delayed_work(work)) 2004 if (!cancel_delayed_work(work))
1698 fc_flush_devloss(shost); 2005 fc_flush_devloss(shost);
1699 2006
@@ -1707,6 +2014,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1707 2014
1708 spin_unlock_irqrestore(shost->host_lock, flags); 2015 spin_unlock_irqrestore(shost->host_lock, flags);
1709 2016
2017 scsi_target_unblock(&rport->dev);
2018
1710 return rport; 2019 return rport;
1711 } 2020 }
1712 } 2021 }
@@ -1762,9 +2071,10 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1762 /* initiate a scan of the target */ 2071 /* initiate a scan of the target */
1763 rport->flags |= FC_RPORT_SCAN_PENDING; 2072 rport->flags |= FC_RPORT_SCAN_PENDING;
1764 scsi_queue_work(shost, &rport->scan_work); 2073 scsi_queue_work(shost, &rport->scan_work);
1765 } 2074 spin_unlock_irqrestore(shost->host_lock, flags);
1766 2075 scsi_target_unblock(&rport->dev);
1767 spin_unlock_irqrestore(shost->host_lock, flags); 2076 } else
2077 spin_unlock_irqrestore(shost->host_lock, flags);
1768 2078
1769 return rport; 2079 return rport;
1770 } 2080 }
@@ -1834,6 +2144,7 @@ void
1834fc_remote_port_delete(struct fc_rport *rport) 2144fc_remote_port_delete(struct fc_rport *rport)
1835{ 2145{
1836 struct Scsi_Host *shost = rport_to_shost(rport); 2146 struct Scsi_Host *shost = rport_to_shost(rport);
2147 struct fc_internal *i = to_fc_internal(shost->transportt);
1837 int timeout = rport->dev_loss_tmo; 2148 int timeout = rport->dev_loss_tmo;
1838 unsigned long flags; 2149 unsigned long flags;
1839 2150
@@ -1864,6 +2175,12 @@ fc_remote_port_delete(struct fc_rport *rport)
1864 2175
1865 scsi_target_block(&rport->dev); 2176 scsi_target_block(&rport->dev);
1866 2177
2178 /* see if we need to kill io faster than waiting for device loss */
2179 if ((rport->fast_io_fail_tmo != -1) &&
2180 (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
2181 fc_queue_devloss_work(shost, &rport->fail_io_work,
2182 rport->fast_io_fail_tmo * HZ);
2183
1867 /* cap the length the devices can be blocked until they are deleted */ 2184 /* cap the length the devices can be blocked until they are deleted */
1868 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); 2185 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
1869} 2186}
@@ -1923,6 +2240,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
1923 * machine state change will validate the 2240 * machine state change will validate the
1924 * transaction. 2241 * transaction.
1925 */ 2242 */
2243 if (!cancel_delayed_work(&rport->fail_io_work))
2244 fc_flush_devloss(shost);
1926 if (!cancel_delayed_work(&rport->dev_loss_work)) 2245 if (!cancel_delayed_work(&rport->dev_loss_work))
1927 fc_flush_devloss(shost); 2246 fc_flush_devloss(shost);
1928 2247
@@ -1938,6 +2257,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
1938 rport->flags |= FC_RPORT_SCAN_PENDING; 2257 rport->flags |= FC_RPORT_SCAN_PENDING;
1939 scsi_queue_work(shost, &rport->scan_work); 2258 scsi_queue_work(shost, &rport->scan_work);
1940 spin_unlock_irqrestore(shost->host_lock, flags); 2259 spin_unlock_irqrestore(shost->host_lock, flags);
2260 scsi_target_unblock(&rport->dev);
1941 } 2261 }
1942} 2262}
1943EXPORT_SYMBOL(fc_remote_port_rolechg); 2263EXPORT_SYMBOL(fc_remote_port_rolechg);
@@ -1970,8 +2290,9 @@ fc_timeout_deleted_rport(void *data)
1970 dev_printk(KERN_ERR, &rport->dev, 2290 dev_printk(KERN_ERR, &rport->dev,
1971 "blocked FC remote port time out: no longer" 2291 "blocked FC remote port time out: no longer"
1972 " a FCP target, removing starget\n"); 2292 " a FCP target, removing starget\n");
1973 fc_queue_work(shost, &rport->stgt_delete_work);
1974 spin_unlock_irqrestore(shost->host_lock, flags); 2293 spin_unlock_irqrestore(shost->host_lock, flags);
2294 scsi_target_unblock(&rport->dev);
2295 fc_queue_work(shost, &rport->stgt_delete_work);
1975 return; 2296 return;
1976 } 2297 }
1977 2298
@@ -2035,17 +2356,37 @@ fc_timeout_deleted_rport(void *data)
2035 * went away and didn't come back - we'll remove 2356 * went away and didn't come back - we'll remove
2036 * all attached scsi devices. 2357 * all attached scsi devices.
2037 */ 2358 */
2359 spin_unlock_irqrestore(shost->host_lock, flags);
2360
2361 scsi_target_unblock(&rport->dev);
2038 fc_queue_work(shost, &rport->stgt_delete_work); 2362 fc_queue_work(shost, &rport->stgt_delete_work);
2363}
2039 2364
2040 spin_unlock_irqrestore(shost->host_lock, flags); 2365/**
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target.
2368 *
2369 * @data: rport to terminate io on.
2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning.
2373 **/
2374static void
2375fc_timeout_fail_rport_io(void *data)
2376{
2377 struct fc_rport *rport = (struct fc_rport *)data;
2378 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt);
2380
2381 if (rport->port_state != FC_PORTSTATE_BLOCKED)
2382 return;
2383
2384 i->f->terminate_rport_io(rport);
2041} 2385}
2042 2386
2043/** 2387/**
2044 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2045 * 2389 *
2046 * Will unblock the target (in case it went away and has now come back),
2047 * then invoke a scan.
2048 *
2049 * @data: remote port to be scanned. 2390 * @data: remote port to be scanned.
2050 **/ 2391 **/
2051static void 2392static void
@@ -2057,7 +2398,6 @@ fc_scsi_scan_rport(void *data)
2057 2398
2058 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2399 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2059 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2400 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
2060 scsi_target_unblock(&rport->dev);
2061 scsi_scan_target(&rport->dev, rport->channel, 2401 scsi_scan_target(&rport->dev, rport->channel,
2062 rport->scsi_target_id, SCAN_WILD_CARD, 1); 2402 rport->scsi_target_id, SCAN_WILD_CARD, 1);
2063 } 2403 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5569fdcfd621..7b0019cccce3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "2.0-685"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -228,14 +229,11 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
228static void iscsi_session_release(struct device *dev) 229static void iscsi_session_release(struct device *dev)
229{ 230{
230 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); 231 struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
231 struct iscsi_transport *transport = session->transport;
232 struct Scsi_Host *shost; 232 struct Scsi_Host *shost;
233 233
234 shost = iscsi_session_to_shost(session); 234 shost = iscsi_session_to_shost(session);
235 scsi_host_put(shost); 235 scsi_host_put(shost);
236 kfree(session->targetname);
237 kfree(session); 236 kfree(session);
238 module_put(transport->owner);
239} 237}
240 238
241static int iscsi_is_session_dev(const struct device *dev) 239static int iscsi_is_session_dev(const struct device *dev)
@@ -251,10 +249,9 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
251 249
252 mutex_lock(&ihost->mutex); 250 mutex_lock(&ihost->mutex);
253 list_for_each_entry(session, &ihost->sessions, host_list) { 251 list_for_each_entry(session, &ihost->sessions, host_list) {
254 if ((channel == SCAN_WILD_CARD || 252 if ((channel == SCAN_WILD_CARD || channel == 0) &&
255 channel == session->channel) &&
256 (id == SCAN_WILD_CARD || id == session->target_id)) 253 (id == SCAN_WILD_CARD || id == session->target_id))
257 scsi_scan_target(&session->dev, session->channel, 254 scsi_scan_target(&session->dev, 0,
258 session->target_id, lun, 1); 255 session->target_id, lun, 1);
259 } 256 }
260 mutex_unlock(&ihost->mutex); 257 mutex_unlock(&ihost->mutex);
@@ -291,80 +288,92 @@ void iscsi_block_session(struct iscsi_cls_session *session)
291} 288}
292EXPORT_SYMBOL_GPL(iscsi_block_session); 289EXPORT_SYMBOL_GPL(iscsi_block_session);
293 290
294/**
295 * iscsi_create_session - create iscsi class session
296 * @shost: scsi host
297 * @transport: iscsi transport
298 *
299 * This can be called from a LLD or iscsi_transport.
300 **/
301struct iscsi_cls_session * 291struct iscsi_cls_session *
302iscsi_create_session(struct Scsi_Host *shost, 292iscsi_alloc_session(struct Scsi_Host *shost,
303 struct iscsi_transport *transport, int channel) 293 struct iscsi_transport *transport)
304{ 294{
305 struct iscsi_host *ihost;
306 struct iscsi_cls_session *session; 295 struct iscsi_cls_session *session;
307 int err;
308
309 if (!try_module_get(transport->owner))
310 return NULL;
311 296
312 session = kzalloc(sizeof(*session) + transport->sessiondata_size, 297 session = kzalloc(sizeof(*session) + transport->sessiondata_size,
313 GFP_KERNEL); 298 GFP_KERNEL);
314 if (!session) 299 if (!session)
315 goto module_put; 300 return NULL;
301
316 session->transport = transport; 302 session->transport = transport;
317 session->recovery_tmo = 120; 303 session->recovery_tmo = 120;
318 INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 304 INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
319 INIT_LIST_HEAD(&session->host_list); 305 INIT_LIST_HEAD(&session->host_list);
320 INIT_LIST_HEAD(&session->sess_list); 306 INIT_LIST_HEAD(&session->sess_list);
321 307
308 /* this is released in the dev's release function */
309 scsi_host_get(shost);
310 session->dev.parent = &shost->shost_gendev;
311 session->dev.release = iscsi_session_release;
312 device_initialize(&session->dev);
322 if (transport->sessiondata_size) 313 if (transport->sessiondata_size)
323 session->dd_data = &session[1]; 314 session->dd_data = &session[1];
315 return session;
316}
317EXPORT_SYMBOL_GPL(iscsi_alloc_session);
324 318
325 /* this is released in the dev's release function */ 319int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
326 scsi_host_get(shost); 320{
327 ihost = shost->shost_data; 321 struct Scsi_Host *shost = iscsi_session_to_shost(session);
322 struct iscsi_host *ihost;
323 int err;
328 324
325 ihost = shost->shost_data;
329 session->sid = iscsi_session_nr++; 326 session->sid = iscsi_session_nr++;
330 session->channel = channel; 327 session->target_id = target_id;
331 session->target_id = ihost->next_target_id++;
332 328
333 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 329 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
334 session->sid); 330 session->sid);
335 session->dev.parent = &shost->shost_gendev; 331 err = device_add(&session->dev);
336 session->dev.release = iscsi_session_release;
337 err = device_register(&session->dev);
338 if (err) { 332 if (err) {
339 dev_printk(KERN_ERR, &session->dev, "iscsi: could not " 333 dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
340 "register session's dev\n"); 334 "register session's dev\n");
341 goto free_session; 335 goto release_host;
342 } 336 }
343 transport_register_device(&session->dev); 337 transport_register_device(&session->dev);
344 338
345 mutex_lock(&ihost->mutex); 339 mutex_lock(&ihost->mutex);
346 list_add(&session->host_list, &ihost->sessions); 340 list_add(&session->host_list, &ihost->sessions);
347 mutex_unlock(&ihost->mutex); 341 mutex_unlock(&ihost->mutex);
342 return 0;
348 343
349 return session; 344release_host:
350 345 scsi_host_put(shost);
351free_session: 346 return err;
352 kfree(session);
353module_put:
354 module_put(transport->owner);
355 return NULL;
356} 347}
357 348EXPORT_SYMBOL_GPL(iscsi_add_session);
358EXPORT_SYMBOL_GPL(iscsi_create_session);
359 349
360/** 350/**
361 * iscsi_destroy_session - destroy iscsi session 351 * iscsi_create_session - create iscsi class session
362 * @session: iscsi_session 352 * @shost: scsi host
353 * @transport: iscsi transport
363 * 354 *
364 * Can be called by a LLD or iscsi_transport. There must not be 355 * This can be called from a LLD or iscsi_transport.
365 * any running connections.
366 **/ 356 **/
367int iscsi_destroy_session(struct iscsi_cls_session *session) 357struct iscsi_cls_session *
358iscsi_create_session(struct Scsi_Host *shost,
359 struct iscsi_transport *transport,
360 unsigned int target_id)
361{
362 struct iscsi_cls_session *session;
363
364 session = iscsi_alloc_session(shost, transport);
365 if (!session)
366 return NULL;
367
368 if (iscsi_add_session(session, target_id)) {
369 iscsi_free_session(session);
370 return NULL;
371 }
372 return session;
373}
374EXPORT_SYMBOL_GPL(iscsi_create_session);
375
376void iscsi_remove_session(struct iscsi_cls_session *session)
368{ 377{
369 struct Scsi_Host *shost = iscsi_session_to_shost(session); 378 struct Scsi_Host *shost = iscsi_session_to_shost(session);
370 struct iscsi_host *ihost = shost->shost_data; 379 struct iscsi_host *ihost = shost->shost_data;
@@ -376,19 +385,88 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
376 list_del(&session->host_list); 385 list_del(&session->host_list);
377 mutex_unlock(&ihost->mutex); 386 mutex_unlock(&ihost->mutex);
378 387
388 scsi_remove_target(&session->dev);
389
379 transport_unregister_device(&session->dev); 390 transport_unregister_device(&session->dev);
380 device_unregister(&session->dev); 391 device_del(&session->dev);
381 return 0; 392}
393EXPORT_SYMBOL_GPL(iscsi_remove_session);
394
395void iscsi_free_session(struct iscsi_cls_session *session)
396{
397 put_device(&session->dev);
382} 398}
383 399
400EXPORT_SYMBOL_GPL(iscsi_free_session);
401
402/**
403 * iscsi_destroy_session - destroy iscsi session
404 * @session: iscsi_session
405 *
406 * Can be called by a LLD or iscsi_transport. There must not be
407 * any running connections.
408 **/
409int iscsi_destroy_session(struct iscsi_cls_session *session)
410{
411 iscsi_remove_session(session);
412 iscsi_free_session(session);
413 return 0;
414}
384EXPORT_SYMBOL_GPL(iscsi_destroy_session); 415EXPORT_SYMBOL_GPL(iscsi_destroy_session);
385 416
417static void mempool_zone_destroy(struct mempool_zone *zp)
418{
419 mempool_destroy(zp->pool);
420 kfree(zp);
421}
422
423static void*
424mempool_zone_alloc_skb(gfp_t gfp_mask, void *pool_data)
425{
426 struct mempool_zone *zone = pool_data;
427
428 return alloc_skb(zone->size, gfp_mask);
429}
430
431static void
432mempool_zone_free_skb(void *element, void *pool_data)
433{
434 kfree_skb(element);
435}
436
437static struct mempool_zone *
438mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
439{
440 struct mempool_zone *zp;
441
442 zp = kzalloc(sizeof(*zp), GFP_KERNEL);
443 if (!zp)
444 return NULL;
445
446 zp->size = size;
447 zp->hiwat = hiwat;
448 INIT_LIST_HEAD(&zp->freequeue);
449 spin_lock_init(&zp->freelock);
450 atomic_set(&zp->allocated, 0);
451
452 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
453 mempool_zone_free_skb, zp);
454 if (!zp->pool) {
455 kfree(zp);
456 return NULL;
457 }
458
459 return zp;
460}
461
386static void iscsi_conn_release(struct device *dev) 462static void iscsi_conn_release(struct device *dev)
387{ 463{
388 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); 464 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
389 struct device *parent = conn->dev.parent; 465 struct device *parent = conn->dev.parent;
390 466
391 kfree(conn->persistent_address); 467 mempool_zone_destroy(conn->z_pdu);
468 mempool_zone_destroy(conn->z_error);
469
392 kfree(conn); 470 kfree(conn);
393 put_device(parent); 471 put_device(parent);
394} 472}
@@ -398,6 +476,31 @@ static int iscsi_is_conn_dev(const struct device *dev)
398 return dev->release == iscsi_conn_release; 476 return dev->release == iscsi_conn_release;
399} 477}
400 478
479static int iscsi_create_event_pools(struct iscsi_cls_conn *conn)
480{
481 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
482 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
483 sizeof(struct iscsi_hdr) +
484 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH),
485 Z_HIWAT_PDU);
486 if (!conn->z_pdu) {
487 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
488 "pdu zone for new conn\n");
489 return -ENOMEM;
490 }
491
492 conn->z_error = mempool_zone_init(Z_MAX_ERROR,
493 NLMSG_SPACE(sizeof(struct iscsi_uevent)),
494 Z_HIWAT_ERROR);
495 if (!conn->z_error) {
496 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
497 "error zone for new conn\n");
498 mempool_zone_destroy(conn->z_pdu);
499 return -ENOMEM;
500 }
501 return 0;
502}
503
401/** 504/**
402 * iscsi_create_conn - create iscsi class connection 505 * iscsi_create_conn - create iscsi class connection
403 * @session: iscsi cls session 506 * @session: iscsi cls session
@@ -430,9 +533,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
430 conn->transport = transport; 533 conn->transport = transport;
431 conn->cid = cid; 534 conn->cid = cid;
432 535
536 if (iscsi_create_event_pools(conn))
537 goto free_conn;
538
433 /* this is released in the dev's release function */ 539 /* this is released in the dev's release function */
434 if (!get_device(&session->dev)) 540 if (!get_device(&session->dev))
435 goto free_conn; 541 goto free_conn_pools;
436 542
437 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u", 543 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u",
438 session->sid, cid); 544 session->sid, cid);
@@ -449,6 +555,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
449 555
450release_parent_ref: 556release_parent_ref:
451 put_device(&session->dev); 557 put_device(&session->dev);
558free_conn_pools:
559
452free_conn: 560free_conn:
453 kfree(conn); 561 kfree(conn);
454 return NULL; 562 return NULL;
@@ -496,20 +604,6 @@ static inline struct list_head *skb_to_lh(struct sk_buff *skb)
496 return (struct list_head *)&skb->cb; 604 return (struct list_head *)&skb->cb;
497} 605}
498 606
499static void*
500mempool_zone_alloc_skb(gfp_t gfp_mask, void *pool_data)
501{
502 struct mempool_zone *zone = pool_data;
503
504 return alloc_skb(zone->size, gfp_mask);
505}
506
507static void
508mempool_zone_free_skb(void *element, void *pool_data)
509{
510 kfree_skb(element);
511}
512
513static void 607static void
514mempool_zone_complete(struct mempool_zone *zone) 608mempool_zone_complete(struct mempool_zone *zone)
515{ 609{
@@ -529,37 +623,6 @@ mempool_zone_complete(struct mempool_zone *zone)
529 spin_unlock_irqrestore(&zone->freelock, flags); 623 spin_unlock_irqrestore(&zone->freelock, flags);
530} 624}
531 625
532static struct mempool_zone *
533mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
534{
535 struct mempool_zone *zp;
536
537 zp = kzalloc(sizeof(*zp), GFP_KERNEL);
538 if (!zp)
539 return NULL;
540
541 zp->size = size;
542 zp->hiwat = hiwat;
543 INIT_LIST_HEAD(&zp->freequeue);
544 spin_lock_init(&zp->freelock);
545 atomic_set(&zp->allocated, 0);
546
547 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
548 mempool_zone_free_skb, zp);
549 if (!zp->pool) {
550 kfree(zp);
551 return NULL;
552 }
553
554 return zp;
555}
556
557static void mempool_zone_destroy(struct mempool_zone *zp)
558{
559 mempool_destroy(zp->pool);
560 kfree(zp);
561}
562
563static struct sk_buff* 626static struct sk_buff*
564mempool_zone_get_skb(struct mempool_zone *zone) 627mempool_zone_get_skb(struct mempool_zone *zone)
565{ 628{
@@ -572,6 +635,27 @@ mempool_zone_get_skb(struct mempool_zone *zone)
572} 635}
573 636
574static int 637static int
638iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
639{
640 unsigned long flags;
641 int rc;
642
643 skb_get(skb);
644 rc = netlink_broadcast(nls, skb, 0, 1, gfp);
645 if (rc < 0) {
646 mempool_free(skb, zone->pool);
647 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
648 return rc;
649 }
650
651 spin_lock_irqsave(&zone->freelock, flags);
652 INIT_LIST_HEAD(skb_to_lh(skb));
653 list_add(skb_to_lh(skb), &zone->freequeue);
654 spin_unlock_irqrestore(&zone->freelock, flags);
655 return 0;
656}
657
658static int
575iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb, int pid) 659iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb, int pid)
576{ 660{
577 unsigned long flags; 661 unsigned long flags;
@@ -666,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
666 ev->r.connerror.cid = conn->cid; 750 ev->r.connerror.cid = conn->cid;
667 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 751 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
668 752
669 iscsi_unicast_skb(conn->z_error, skb, priv->daemon_pid); 753 iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
670 754
671 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 755 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
672 error); 756 error);
@@ -767,6 +851,131 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
767 return err; 851 return err;
768} 852}
769 853
854/**
855 * iscsi_if_destroy_session_done - send session destr. completion event
856 * @conn: last connection for session
857 *
858 * This is called by HW iscsi LLDs to notify userpsace that its HW has
859 * removed a session.
860 **/
861int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
862{
863 struct iscsi_internal *priv;
864 struct iscsi_cls_session *session;
865 struct Scsi_Host *shost;
866 struct iscsi_uevent *ev;
867 struct sk_buff *skb;
868 struct nlmsghdr *nlh;
869 unsigned long flags;
870 int rc, len = NLMSG_SPACE(sizeof(*ev));
871
872 priv = iscsi_if_transport_lookup(conn->transport);
873 if (!priv)
874 return -EINVAL;
875
876 session = iscsi_dev_to_session(conn->dev.parent);
877 shost = iscsi_session_to_shost(session);
878
879 mempool_zone_complete(conn->z_pdu);
880
881 skb = mempool_zone_get_skb(conn->z_pdu);
882 if (!skb) {
883 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
884 "session creation event\n");
885 return -ENOMEM;
886 }
887
888 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
889 ev = NLMSG_DATA(nlh);
890 ev->transport_handle = iscsi_handle(conn->transport);
891 ev->type = ISCSI_KEVENT_DESTROY_SESSION;
892 ev->r.d_session.host_no = shost->host_no;
893 ev->r.d_session.sid = session->sid;
894
895 /*
896 * this will occur if the daemon is not up, so we just warn
897 * the user and when the daemon is restarted it will handle it
898 */
899 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
900 if (rc < 0)
901 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
902 "session destruction event. Check iscsi daemon\n");
903
904 spin_lock_irqsave(&sesslock, flags);
905 list_del(&session->sess_list);
906 spin_unlock_irqrestore(&sesslock, flags);
907
908 spin_lock_irqsave(&connlock, flags);
909 conn->active = 0;
910 list_del(&conn->conn_list);
911 spin_unlock_irqrestore(&connlock, flags);
912
913 return rc;
914}
915EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
916
917/**
918 * iscsi_if_create_session_done - send session creation completion event
919 * @conn: leading connection for session
920 *
921 * This is called by HW iscsi LLDs to notify userpsace that its HW has
922 * created a session or a existing session is back in the logged in state.
923 **/
924int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
925{
926 struct iscsi_internal *priv;
927 struct iscsi_cls_session *session;
928 struct Scsi_Host *shost;
929 struct iscsi_uevent *ev;
930 struct sk_buff *skb;
931 struct nlmsghdr *nlh;
932 unsigned long flags;
933 int rc, len = NLMSG_SPACE(sizeof(*ev));
934
935 priv = iscsi_if_transport_lookup(conn->transport);
936 if (!priv)
937 return -EINVAL;
938
939 session = iscsi_dev_to_session(conn->dev.parent);
940 shost = iscsi_session_to_shost(session);
941
942 mempool_zone_complete(conn->z_pdu);
943
944 skb = mempool_zone_get_skb(conn->z_pdu);
945 if (!skb) {
946 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
947 "session creation event\n");
948 return -ENOMEM;
949 }
950
951 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
952 ev = NLMSG_DATA(nlh);
953 ev->transport_handle = iscsi_handle(conn->transport);
954 ev->type = ISCSI_UEVENT_CREATE_SESSION;
955 ev->r.c_session_ret.host_no = shost->host_no;
956 ev->r.c_session_ret.sid = session->sid;
957
958 /*
959 * this will occur if the daemon is not up, so we just warn
960 * the user and when the daemon is restarted it will handle it
961 */
962 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
963 if (rc < 0)
964 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
965 "session creation event. Check iscsi daemon\n");
966
967 spin_lock_irqsave(&sesslock, flags);
968 list_add(&session->sess_list, &sesslist);
969 spin_unlock_irqrestore(&sesslock, flags);
970
971 spin_lock_irqsave(&connlock, flags);
972 list_add(&conn->conn_list, &connlist);
973 conn->active = 1;
974 spin_unlock_irqrestore(&connlock, flags);
975 return rc;
976}
977EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
978
770static int 979static int
771iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 980iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
772{ 981{
@@ -812,26 +1021,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
812 return -ENOMEM; 1021 return -ENOMEM;
813 } 1022 }
814 1023
815 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
816 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
817 sizeof(struct iscsi_hdr) +
818 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH),
819 Z_HIWAT_PDU);
820 if (!conn->z_pdu) {
821 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
822 "pdu zone for new conn\n");
823 goto destroy_conn;
824 }
825
826 conn->z_error = mempool_zone_init(Z_MAX_ERROR,
827 NLMSG_SPACE(sizeof(struct iscsi_uevent)),
828 Z_HIWAT_ERROR);
829 if (!conn->z_error) {
830 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
831 "error zone for new conn\n");
832 goto free_pdu_pool;
833 }
834
835 ev->r.c_conn_ret.sid = session->sid; 1024 ev->r.c_conn_ret.sid = session->sid;
836 ev->r.c_conn_ret.cid = conn->cid; 1025 ev->r.c_conn_ret.cid = conn->cid;
837 1026
@@ -841,13 +1030,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
841 spin_unlock_irqrestore(&connlock, flags); 1030 spin_unlock_irqrestore(&connlock, flags);
842 1031
843 return 0; 1032 return 0;
844
845free_pdu_pool:
846 mempool_zone_destroy(conn->z_pdu);
847destroy_conn:
848 if (transport->destroy_conn)
849 transport->destroy_conn(conn->dd_data);
850 return -ENOMEM;
851} 1033}
852 1034
853static int 1035static int
@@ -855,7 +1037,6 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
855{ 1037{
856 unsigned long flags; 1038 unsigned long flags;
857 struct iscsi_cls_conn *conn; 1039 struct iscsi_cls_conn *conn;
858 struct mempool_zone *z_error, *z_pdu;
859 1040
860 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); 1041 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
861 if (!conn) 1042 if (!conn)
@@ -865,35 +1046,18 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
865 list_del(&conn->conn_list); 1046 list_del(&conn->conn_list);
866 spin_unlock_irqrestore(&connlock, flags); 1047 spin_unlock_irqrestore(&connlock, flags);
867 1048
868 z_pdu = conn->z_pdu;
869 z_error = conn->z_error;
870
871 if (transport->destroy_conn) 1049 if (transport->destroy_conn)
872 transport->destroy_conn(conn); 1050 transport->destroy_conn(conn);
873
874 mempool_zone_destroy(z_pdu);
875 mempool_zone_destroy(z_error);
876
877 return 0; 1051 return 0;
878} 1052}
879 1053
880static void
881iscsi_copy_param(struct iscsi_uevent *ev, uint32_t *value, char *data)
882{
883 if (ev->u.set_param.len != sizeof(uint32_t))
884 BUG();
885 memcpy(value, data, min_t(uint32_t, sizeof(uint32_t),
886 ev->u.set_param.len));
887}
888
889static int 1054static int
890iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) 1055iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
891{ 1056{
892 char *data = (char*)ev + sizeof(*ev); 1057 char *data = (char*)ev + sizeof(*ev);
893 struct iscsi_cls_conn *conn; 1058 struct iscsi_cls_conn *conn;
894 struct iscsi_cls_session *session; 1059 struct iscsi_cls_session *session;
895 int err = 0; 1060 int err = 0, value = 0;
896 uint32_t value = 0;
897 1061
898 session = iscsi_session_lookup(ev->u.set_param.sid); 1062 session = iscsi_session_lookup(ev->u.set_param.sid);
899 conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); 1063 conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
@@ -902,42 +1066,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
902 1066
903 switch (ev->u.set_param.param) { 1067 switch (ev->u.set_param.param) {
904 case ISCSI_PARAM_SESS_RECOVERY_TMO: 1068 case ISCSI_PARAM_SESS_RECOVERY_TMO:
905 iscsi_copy_param(ev, &value, data); 1069 sscanf(data, "%d", &value);
906 if (value != 0) 1070 if (value != 0)
907 session->recovery_tmo = value; 1071 session->recovery_tmo = value;
908 break; 1072 break;
909 case ISCSI_PARAM_TARGET_NAME:
910 /* this should not change between logins */
911 if (session->targetname)
912 return 0;
913
914 session->targetname = kstrdup(data, GFP_KERNEL);
915 if (!session->targetname)
916 return -ENOMEM;
917 break;
918 case ISCSI_PARAM_TPGT:
919 iscsi_copy_param(ev, &value, data);
920 session->tpgt = value;
921 break;
922 case ISCSI_PARAM_PERSISTENT_PORT:
923 iscsi_copy_param(ev, &value, data);
924 conn->persistent_port = value;
925 break;
926 case ISCSI_PARAM_PERSISTENT_ADDRESS:
927 /*
928 * this is the address returned in discovery so it should
929 * not change between logins.
930 */
931 if (conn->persistent_address)
932 return 0;
933
934 conn->persistent_address = kstrdup(data, GFP_KERNEL);
935 if (!conn->persistent_address)
936 return -ENOMEM;
937 break;
938 default: 1073 default:
939 iscsi_copy_param(ev, &value, data); 1074 err = transport->set_param(conn, ev->u.set_param.param,
940 err = transport->set_param(conn, ev->u.set_param.param, value); 1075 data, ev->u.set_param.len);
941 } 1076 }
942 1077
943 return err; 1078 return err;
@@ -978,6 +1113,21 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
978} 1113}
979 1114
980static int 1115static int
1116iscsi_tgt_dscvr(struct iscsi_transport *transport,
1117 struct iscsi_uevent *ev)
1118{
1119 struct sockaddr *dst_addr;
1120
1121 if (!transport->tgt_dscvr)
1122 return -EINVAL;
1123
1124 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1125 return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
1126 ev->u.tgt_dscvr.host_no,
1127 ev->u.tgt_dscvr.enable, dst_addr);
1128}
1129
1130static int
981iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1131iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
982{ 1132{
983 int err = 0; 1133 int err = 0;
@@ -1065,6 +1215,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1065 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1215 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1066 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1216 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1067 break; 1217 break;
1218 case ISCSI_UEVENT_TGT_DSCVR:
1219 err = iscsi_tgt_dscvr(transport, ev);
1220 break;
1068 default: 1221 default:
1069 err = -EINVAL; 1222 err = -EINVAL;
1070 break; 1223 break;
@@ -1147,49 +1300,31 @@ struct class_device_attribute class_device_attr_##_prefix##_##_name = \
1147/* 1300/*
1148 * iSCSI connection attrs 1301 * iSCSI connection attrs
1149 */ 1302 */
1150#define iscsi_conn_int_attr_show(param, format) \ 1303#define iscsi_conn_attr_show(param) \
1151static ssize_t \
1152show_conn_int_param_##param(struct class_device *cdev, char *buf) \
1153{ \
1154 uint32_t value = 0; \
1155 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1156 struct iscsi_transport *t = conn->transport; \
1157 \
1158 t->get_conn_param(conn, param, &value); \
1159 return snprintf(buf, 20, format"\n", value); \
1160}
1161
1162#define iscsi_conn_int_attr(field, param, format) \
1163 iscsi_conn_int_attr_show(param, format) \
1164static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_int_param_##param, \
1165 NULL);
1166
1167iscsi_conn_int_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH, "%u");
1168iscsi_conn_int_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH, "%u");
1169iscsi_conn_int_attr(header_digest, ISCSI_PARAM_HDRDGST_EN, "%d");
1170iscsi_conn_int_attr(data_digest, ISCSI_PARAM_DATADGST_EN, "%d");
1171iscsi_conn_int_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN, "%d");
1172iscsi_conn_int_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN, "%d");
1173iscsi_conn_int_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT, "%d");
1174iscsi_conn_int_attr(port, ISCSI_PARAM_CONN_PORT, "%d");
1175iscsi_conn_int_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN, "%u");
1176
1177#define iscsi_conn_str_attr_show(param) \
1178static ssize_t \ 1304static ssize_t \
1179show_conn_str_param_##param(struct class_device *cdev, char *buf) \ 1305show_conn_param_##param(struct class_device *cdev, char *buf) \
1180{ \ 1306{ \
1181 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 1307 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1182 struct iscsi_transport *t = conn->transport; \ 1308 struct iscsi_transport *t = conn->transport; \
1183 return t->get_conn_str_param(conn, param, buf); \ 1309 return t->get_conn_param(conn, param, buf); \
1184} 1310}
1185 1311
1186#define iscsi_conn_str_attr(field, param) \ 1312#define iscsi_conn_attr(field, param) \
1187 iscsi_conn_str_attr_show(param) \ 1313 iscsi_conn_attr_show(param) \
1188static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_str_param_##param, \ 1314static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \
1189 NULL); 1315 NULL);
1190 1316
1191iscsi_conn_str_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); 1317iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH);
1192iscsi_conn_str_attr(address, ISCSI_PARAM_CONN_ADDRESS); 1318iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH);
1319iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN);
1320iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
1321iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
1322iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
1323iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
1324iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
1325iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
1326iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
1327iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1193 1328
1194#define iscsi_cdev_to_session(_cdev) \ 1329#define iscsi_cdev_to_session(_cdev) \
1195 iscsi_dev_to_session(_cdev->dev) 1330 iscsi_dev_to_session(_cdev->dev)
@@ -1197,61 +1332,36 @@ iscsi_conn_str_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1197/* 1332/*
1198 * iSCSI session attrs 1333 * iSCSI session attrs
1199 */ 1334 */
1200#define iscsi_session_int_attr_show(param, format) \ 1335#define iscsi_session_attr_show(param) \
1201static ssize_t \
1202show_session_int_param_##param(struct class_device *cdev, char *buf) \
1203{ \
1204 uint32_t value = 0; \
1205 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1206 struct iscsi_transport *t = session->transport; \
1207 \
1208 t->get_session_param(session, param, &value); \
1209 return snprintf(buf, 20, format"\n", value); \
1210}
1211
1212#define iscsi_session_int_attr(field, param, format) \
1213 iscsi_session_int_attr_show(param, format) \
1214static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_int_param_##param, \
1215 NULL);
1216
1217iscsi_session_int_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, "%d");
1218iscsi_session_int_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, "%hu");
1219iscsi_session_int_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, "%d");
1220iscsi_session_int_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, "%u");
1221iscsi_session_int_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, "%u");
1222iscsi_session_int_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, "%d");
1223iscsi_session_int_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, "%d");
1224iscsi_session_int_attr(erl, ISCSI_PARAM_ERL, "%d");
1225iscsi_session_int_attr(tpgt, ISCSI_PARAM_TPGT, "%d");
1226
1227#define iscsi_session_str_attr_show(param) \
1228static ssize_t \ 1336static ssize_t \
1229show_session_str_param_##param(struct class_device *cdev, char *buf) \ 1337show_session_param_##param(struct class_device *cdev, char *buf) \
1230{ \ 1338{ \
1231 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1339 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1232 struct iscsi_transport *t = session->transport; \ 1340 struct iscsi_transport *t = session->transport; \
1233 return t->get_session_str_param(session, param, buf); \ 1341 return t->get_session_param(session, param, buf); \
1234} 1342}
1235 1343
1236#define iscsi_session_str_attr(field, param) \ 1344#define iscsi_session_attr(field, param) \
1237 iscsi_session_str_attr_show(param) \ 1345 iscsi_session_attr_show(param) \
1238static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_str_param_##param, \ 1346static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
1239 NULL); 1347 NULL);
1240 1348
1241iscsi_session_str_attr(targetname, ISCSI_PARAM_TARGET_NAME); 1349iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
1350iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
1351iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
1352iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
1353iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
1354iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
1355iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
1356iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
1357iscsi_session_attr(erl, ISCSI_PARAM_ERL);
1358iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
1242 1359
1243/*
1244 * Private session and conn attrs. userspace uses several iscsi values
1245 * to identify each session between reboots. Some of these values may not
1246 * be present in the iscsi_transport/LLD driver becuase userspace handles
1247 * login (and failback for login redirect) so for these type of drivers
1248 * the class manages the attrs and values for the iscsi_transport/LLD
1249 */
1250#define iscsi_priv_session_attr_show(field, format) \ 1360#define iscsi_priv_session_attr_show(field, format) \
1251static ssize_t \ 1361static ssize_t \
1252show_priv_session_##field(struct class_device *cdev, char *buf) \ 1362show_priv_session_##field(struct class_device *cdev, char *buf) \
1253{ \ 1363{ \
1254 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1364 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
1255 return sprintf(buf, format"\n", session->field); \ 1365 return sprintf(buf, format"\n", session->field); \
1256} 1366}
1257 1367
@@ -1259,31 +1369,15 @@ show_priv_session_##field(struct class_device *cdev, char *buf) \
1259 iscsi_priv_session_attr_show(field, format) \ 1369 iscsi_priv_session_attr_show(field, format) \
1260static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \ 1370static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
1261 NULL) 1371 NULL)
1262iscsi_priv_session_attr(targetname, "%s");
1263iscsi_priv_session_attr(tpgt, "%d");
1264iscsi_priv_session_attr(recovery_tmo, "%d"); 1372iscsi_priv_session_attr(recovery_tmo, "%d");
1265 1373
1266#define iscsi_priv_conn_attr_show(field, format) \
1267static ssize_t \
1268show_priv_conn_##field(struct class_device *cdev, char *buf) \
1269{ \
1270 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1271 return sprintf(buf, format"\n", conn->field); \
1272}
1273
1274#define iscsi_priv_conn_attr(field, format) \
1275 iscsi_priv_conn_attr_show(field, format) \
1276static ISCSI_CLASS_ATTR(priv_conn, field, S_IRUGO, show_priv_conn_##field, \
1277 NULL)
1278iscsi_priv_conn_attr(persistent_address, "%s");
1279iscsi_priv_conn_attr(persistent_port, "%d");
1280
1281#define SETUP_PRIV_SESSION_RD_ATTR(field) \ 1374#define SETUP_PRIV_SESSION_RD_ATTR(field) \
1282do { \ 1375do { \
1283 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \ 1376 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
1284 count++; \ 1377 count++; \
1285} while (0) 1378} while (0)
1286 1379
1380
1287#define SETUP_SESSION_RD_ATTR(field, param_flag) \ 1381#define SETUP_SESSION_RD_ATTR(field, param_flag) \
1288do { \ 1382do { \
1289 if (tt->param_mask & param_flag) { \ 1383 if (tt->param_mask & param_flag) { \
@@ -1292,12 +1386,6 @@ do { \
1292 } \ 1386 } \
1293} while (0) 1387} while (0)
1294 1388
1295#define SETUP_PRIV_CONN_RD_ATTR(field) \
1296do { \
1297 priv->conn_attrs[count] = &class_device_attr_priv_conn_##field; \
1298 count++; \
1299} while (0)
1300
1301#define SETUP_CONN_RD_ATTR(field, param_flag) \ 1389#define SETUP_CONN_RD_ATTR(field, param_flag) \
1302do { \ 1390do { \
1303 if (tt->param_mask & param_flag) { \ 1391 if (tt->param_mask & param_flag) { \
@@ -1388,6 +1476,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1388 if (!priv) 1476 if (!priv)
1389 return NULL; 1477 return NULL;
1390 INIT_LIST_HEAD(&priv->list); 1478 INIT_LIST_HEAD(&priv->list);
1479 priv->daemon_pid = -1;
1391 priv->iscsi_transport = tt; 1480 priv->iscsi_transport = tt;
1392 priv->t.user_scan = iscsi_user_scan; 1481 priv->t.user_scan = iscsi_user_scan;
1393 1482
@@ -1424,16 +1513,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1424 SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS); 1513 SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS);
1425 SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT); 1514 SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT);
1426 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN); 1515 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
1427 1516 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
1428 if (tt->param_mask & ISCSI_PERSISTENT_ADDRESS) 1517 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
1429 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
1430 else
1431 SETUP_PRIV_CONN_RD_ATTR(persistent_address);
1432
1433 if (tt->param_mask & ISCSI_PERSISTENT_PORT)
1434 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
1435 else
1436 SETUP_PRIV_CONN_RD_ATTR(persistent_port);
1437 1518
1438 BUG_ON(count > ISCSI_CONN_ATTRS); 1519 BUG_ON(count > ISCSI_CONN_ATTRS);
1439 priv->conn_attrs[count] = NULL; 1520 priv->conn_attrs[count] = NULL;
@@ -1453,18 +1534,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
1453 SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN); 1534 SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN);
1454 SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN); 1535 SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN);
1455 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL); 1536 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
1537 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
1538 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
1456 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1539 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1457 1540
1458 if (tt->param_mask & ISCSI_TARGET_NAME)
1459 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
1460 else
1461 SETUP_PRIV_SESSION_RD_ATTR(targetname);
1462
1463 if (tt->param_mask & ISCSI_TPGT)
1464 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
1465 else
1466 SETUP_PRIV_SESSION_RD_ATTR(tpgt);
1467
1468 BUG_ON(count > ISCSI_SESSION_ATTRS); 1541 BUG_ON(count > ISCSI_SESSION_ATTRS);
1469 priv->session_attrs[count] = NULL; 1542 priv->session_attrs[count] = NULL;
1470 1543
@@ -1541,6 +1614,9 @@ static __init int iscsi_transport_init(void)
1541{ 1614{
1542 int err; 1615 int err;
1543 1616
1617 printk(KERN_INFO "Loading iSCSI transport class v%s.",
1618 ISCSI_TRANSPORT_VERSION);
1619
1544 err = class_register(&iscsi_transport_class); 1620 err = class_register(&iscsi_transport_class);
1545 if (err) 1621 if (err)
1546 return err; 1622 return err;
@@ -1606,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
1606 "Alex Aizman <itn780@yahoo.com>"); 1682 "Alex Aizman <itn780@yahoo.com>");
1607MODULE_DESCRIPTION("iSCSI Transport Interface"); 1683MODULE_DESCRIPTION("iSCSI Transport Interface");
1608MODULE_LICENSE("GPL"); 1684MODULE_LICENSE("GPL");
1685MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1fe6b2d01853..b5b0c2cba96b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -41,6 +41,7 @@ struct sas_host_attrs {
41 struct mutex lock; 41 struct mutex lock;
42 u32 next_target_id; 42 u32 next_target_id;
43 u32 next_expander_id; 43 u32 next_expander_id;
44 int next_port_id;
44}; 45};
45#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) 46#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
46 47
@@ -76,6 +77,24 @@ get_sas_##title##_names(u32 table_key, char *buf) \
76 return len; \ 77 return len; \
77} 78}
78 79
80#define sas_bitfield_name_set(title, table) \
81static ssize_t \
82set_sas_##title##_names(u32 *table_key, const char *buf) \
83{ \
84 ssize_t len = 0; \
85 int i; \
86 \
87 for (i = 0; i < ARRAY_SIZE(table); i++) { \
88 len = strlen(table[i].name); \
89 if (strncmp(buf, table[i].name, len) == 0 && \
90 (buf[len] == '\n' || buf[len] == '\0')) { \
91 *table_key = table[i].value; \
92 return 0; \
93 } \
94 } \
95 return -EINVAL; \
96}
97
79#define sas_bitfield_name_search(title, table) \ 98#define sas_bitfield_name_search(title, table) \
80static ssize_t \ 99static ssize_t \
81get_sas_##title##_names(u32 table_key, char *buf) \ 100get_sas_##title##_names(u32 table_key, char *buf) \
@@ -130,7 +149,7 @@ static struct {
130 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 149 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
131}; 150};
132sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 151sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
133 152sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
134 153
135/* 154/*
136 * SAS host attributes 155 * SAS host attributes
@@ -146,6 +165,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
146 mutex_init(&sas_host->lock); 165 mutex_init(&sas_host->lock);
147 sas_host->next_target_id = 0; 166 sas_host->next_target_id = 0;
148 sas_host->next_expander_id = 0; 167 sas_host->next_expander_id = 0;
168 sas_host->next_port_id = 0;
149 return 0; 169 return 0;
150} 170}
151 171
@@ -174,12 +194,29 @@ static int sas_host_match(struct attribute_container *cont,
174 194
175static int do_sas_phy_delete(struct device *dev, void *data) 195static int do_sas_phy_delete(struct device *dev, void *data)
176{ 196{
177 if (scsi_is_sas_phy(dev)) 197 int pass = (int)(unsigned long)data;
198
199 if (pass == 0 && scsi_is_sas_port(dev))
200 sas_port_delete(dev_to_sas_port(dev));
201 else if (pass == 1 && scsi_is_sas_phy(dev))
178 sas_phy_delete(dev_to_phy(dev)); 202 sas_phy_delete(dev_to_phy(dev));
179 return 0; 203 return 0;
180} 204}
181 205
182/** 206/**
207 * sas_remove_children -- tear down a devices SAS data structures
208 * @dev: device belonging to the sas object
209 *
210 * Removes all SAS PHYs and remote PHYs for a given object
211 */
212void sas_remove_children(struct device *dev)
213{
214 device_for_each_child(dev, (void *)0, do_sas_phy_delete);
215 device_for_each_child(dev, (void *)1, do_sas_phy_delete);
216}
217EXPORT_SYMBOL(sas_remove_children);
218
219/**
183 * sas_remove_host -- tear down a Scsi_Host's SAS data structures 220 * sas_remove_host -- tear down a Scsi_Host's SAS data structures
184 * @shost: Scsi Host that is torn down 221 * @shost: Scsi Host that is torn down
185 * 222 *
@@ -188,13 +225,13 @@ static int do_sas_phy_delete(struct device *dev, void *data)
188 */ 225 */
189void sas_remove_host(struct Scsi_Host *shost) 226void sas_remove_host(struct Scsi_Host *shost)
190{ 227{
191 device_for_each_child(&shost->shost_gendev, NULL, do_sas_phy_delete); 228 sas_remove_children(&shost->shost_gendev);
192} 229}
193EXPORT_SYMBOL(sas_remove_host); 230EXPORT_SYMBOL(sas_remove_host);
194 231
195 232
196/* 233/*
197 * SAS Port attributes 234 * SAS Phy attributes
198 */ 235 */
199 236
200#define sas_phy_show_simple(field, name, format_string, cast) \ 237#define sas_phy_show_simple(field, name, format_string, cast) \
@@ -234,10 +271,39 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
234 return get_sas_linkspeed_names(phy->field, buf); \ 271 return get_sas_linkspeed_names(phy->field, buf); \
235} 272}
236 273
274/* Fudge to tell if we're minimum or maximum */
275#define sas_phy_store_linkspeed(field) \
276static ssize_t \
277store_sas_phy_##field(struct class_device *cdev, const char *buf, \
278 size_t count) \
279{ \
280 struct sas_phy *phy = transport_class_to_phy(cdev); \
281 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \
282 struct sas_internal *i = to_sas_internal(shost->transportt); \
283 u32 value; \
284 struct sas_phy_linkrates rates = {0}; \
285 int error; \
286 \
287 error = set_sas_linkspeed_names(&value, buf); \
288 if (error) \
289 return error; \
290 rates.field = value; \
291 error = i->f->set_phy_speed(phy, &rates); \
292 \
293 return error ? error : count; \
294}
295
296#define sas_phy_linkspeed_rw_attr(field) \
297 sas_phy_show_linkspeed(field) \
298 sas_phy_store_linkspeed(field) \
299static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \
300 store_sas_phy_##field)
301
237#define sas_phy_linkspeed_attr(field) \ 302#define sas_phy_linkspeed_attr(field) \
238 sas_phy_show_linkspeed(field) \ 303 sas_phy_show_linkspeed(field) \
239static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) 304static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
240 305
306
241#define sas_phy_show_linkerror(field) \ 307#define sas_phy_show_linkerror(field) \
242static ssize_t \ 308static ssize_t \
243show_sas_phy_##field(struct class_device *cdev, char *buf) \ 309show_sas_phy_##field(struct class_device *cdev, char *buf) \
@@ -247,9 +313,6 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
247 struct sas_internal *i = to_sas_internal(shost->transportt); \ 313 struct sas_internal *i = to_sas_internal(shost->transportt); \
248 int error; \ 314 int error; \
249 \ 315 \
250 if (!phy->local_attached) \
251 return -EINVAL; \
252 \
253 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ 316 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \
254 if (error) \ 317 if (error) \
255 return error; \ 318 return error; \
@@ -280,9 +343,6 @@ static ssize_t do_sas_phy_reset(struct class_device *cdev,
280 struct sas_internal *i = to_sas_internal(shost->transportt); 343 struct sas_internal *i = to_sas_internal(shost->transportt);
281 int error; 344 int error;
282 345
283 if (!phy->local_attached)
284 return -EINVAL;
285
286 error = i->f->phy_reset(phy, hard_reset); 346 error = i->f->phy_reset(phy, hard_reset);
287 if (error) 347 if (error)
288 return error; 348 return error;
@@ -310,12 +370,12 @@ sas_phy_protocol_attr(identify.target_port_protocols,
310sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 370sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
311 unsigned long long); 371 unsigned long long);
312sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 372sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
313sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 373//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
314sas_phy_linkspeed_attr(negotiated_linkrate); 374sas_phy_linkspeed_attr(negotiated_linkrate);
315sas_phy_linkspeed_attr(minimum_linkrate_hw); 375sas_phy_linkspeed_attr(minimum_linkrate_hw);
316sas_phy_linkspeed_attr(minimum_linkrate); 376sas_phy_linkspeed_rw_attr(minimum_linkrate);
317sas_phy_linkspeed_attr(maximum_linkrate_hw); 377sas_phy_linkspeed_attr(maximum_linkrate_hw);
318sas_phy_linkspeed_attr(maximum_linkrate); 378sas_phy_linkspeed_rw_attr(maximum_linkrate);
319sas_phy_linkerror_attr(invalid_dword_count); 379sas_phy_linkerror_attr(invalid_dword_count);
320sas_phy_linkerror_attr(running_disparity_error_count); 380sas_phy_linkerror_attr(running_disparity_error_count);
321sas_phy_linkerror_attr(loss_of_dword_sync_count); 381sas_phy_linkerror_attr(loss_of_dword_sync_count);
@@ -378,9 +438,10 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
378 device_initialize(&phy->dev); 438 device_initialize(&phy->dev);
379 phy->dev.parent = get_device(parent); 439 phy->dev.parent = get_device(parent);
380 phy->dev.release = sas_phy_release; 440 phy->dev.release = sas_phy_release;
441 INIT_LIST_HEAD(&phy->port_siblings);
381 if (scsi_is_sas_expander_device(parent)) { 442 if (scsi_is_sas_expander_device(parent)) {
382 struct sas_rphy *rphy = dev_to_rphy(parent); 443 struct sas_rphy *rphy = dev_to_rphy(parent);
383 sprintf(phy->dev.bus_id, "phy-%d-%d:%d", shost->host_no, 444 sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no,
384 rphy->scsi_target_id, number); 445 rphy->scsi_target_id, number);
385 } else 446 } else
386 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number); 447 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
@@ -440,8 +501,8 @@ sas_phy_delete(struct sas_phy *phy)
440{ 501{
441 struct device *dev = &phy->dev; 502 struct device *dev = &phy->dev;
442 503
443 if (phy->rphy) 504 /* this happens if the phy is still part of a port when deleted */
444 sas_rphy_delete(phy->rphy); 505 BUG_ON(!list_empty(&phy->port_siblings));
445 506
446 transport_remove_device(dev); 507 transport_remove_device(dev);
447 device_del(dev); 508 device_del(dev);
@@ -464,6 +525,310 @@ int scsi_is_sas_phy(const struct device *dev)
464EXPORT_SYMBOL(scsi_is_sas_phy); 525EXPORT_SYMBOL(scsi_is_sas_phy);
465 526
466/* 527/*
528 * SAS Port attributes
529 */
530#define sas_port_show_simple(field, name, format_string, cast) \
531static ssize_t \
532show_sas_port_##name(struct class_device *cdev, char *buf) \
533{ \
534 struct sas_port *port = transport_class_to_sas_port(cdev); \
535 \
536 return snprintf(buf, 20, format_string, cast port->field); \
537}
538
539#define sas_port_simple_attr(field, name, format_string, type) \
540 sas_port_show_simple(field, name, format_string, (type)) \
541static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL)
542
543sas_port_simple_attr(num_phys, num_phys, "%d\n", int);
544
545static DECLARE_TRANSPORT_CLASS(sas_port_class,
546 "sas_port", NULL, NULL, NULL);
547
548static int sas_port_match(struct attribute_container *cont, struct device *dev)
549{
550 struct Scsi_Host *shost;
551 struct sas_internal *i;
552
553 if (!scsi_is_sas_port(dev))
554 return 0;
555 shost = dev_to_shost(dev->parent);
556
557 if (!shost->transportt)
558 return 0;
559 if (shost->transportt->host_attrs.ac.class !=
560 &sas_host_class.class)
561 return 0;
562
563 i = to_sas_internal(shost->transportt);
564 return &i->port_attr_cont.ac == cont;
565}
566
567
568static void sas_port_release(struct device *dev)
569{
570 struct sas_port *port = dev_to_sas_port(dev);
571
572 BUG_ON(!list_empty(&port->phy_list));
573
574 put_device(dev->parent);
575 kfree(port);
576}
577
578static void sas_port_create_link(struct sas_port *port,
579 struct sas_phy *phy)
580{
581 sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, phy->dev.bus_id);
582 sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
583}
584
585static void sas_port_delete_link(struct sas_port *port,
586 struct sas_phy *phy)
587{
588 sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id);
589 sysfs_remove_link(&phy->dev.kobj, "port");
590}
591
592/** sas_port_alloc - allocate and initialize a SAS port structure
593 *
594 * @parent: parent device
595 * @port_id: port number
596 *
597 * Allocates a SAS port structure. It will be added to the device tree
598 * below the device specified by @parent which must be either a Scsi_Host
599 * or a sas_expander_device.
600 *
601 * Returns %NULL on error
602 */
603struct sas_port *sas_port_alloc(struct device *parent, int port_id)
604{
605 struct Scsi_Host *shost = dev_to_shost(parent);
606 struct sas_port *port;
607
608 port = kzalloc(sizeof(*port), GFP_KERNEL);
609 if (!port)
610 return NULL;
611
612 port->port_identifier = port_id;
613
614 device_initialize(&port->dev);
615
616 port->dev.parent = get_device(parent);
617 port->dev.release = sas_port_release;
618
619 mutex_init(&port->phy_list_mutex);
620 INIT_LIST_HEAD(&port->phy_list);
621
622 if (scsi_is_sas_expander_device(parent)) {
623 struct sas_rphy *rphy = dev_to_rphy(parent);
624 sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no,
625 rphy->scsi_target_id, port->port_identifier);
626 } else
627 sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no,
628 port->port_identifier);
629
630 transport_setup_device(&port->dev);
631
632 return port;
633}
634EXPORT_SYMBOL(sas_port_alloc);
635
636/** sas_port_alloc_num - allocate and initialize a SAS port structure
637 *
638 * @parent: parent device
639 *
640 * Allocates a SAS port structure and a number to go with it. This
641 * interface is really for adapters where the port number has no
642 * meansing, so the sas class should manage them. It will be added to
643 * the device tree below the device specified by @parent which must be
644 * either a Scsi_Host or a sas_expander_device.
645 *
646 * Returns %NULL on error
647 */
648struct sas_port *sas_port_alloc_num(struct device *parent)
649{
650 int index;
651 struct Scsi_Host *shost = dev_to_shost(parent);
652 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
653
654 /* FIXME: use idr for this eventually */
655 mutex_lock(&sas_host->lock);
656 if (scsi_is_sas_expander_device(parent)) {
657 struct sas_rphy *rphy = dev_to_rphy(parent);
658 struct sas_expander_device *exp = rphy_to_expander_device(rphy);
659
660 index = exp->next_port_id++;
661 } else
662 index = sas_host->next_port_id++;
663 mutex_unlock(&sas_host->lock);
664 return sas_port_alloc(parent, index);
665}
666EXPORT_SYMBOL(sas_port_alloc_num);
667
668/**
669 * sas_port_add - add a SAS port to the device hierarchy
670 *
671 * @port: port to be added
672 *
673 * publishes a port to the rest of the system
674 */
675int sas_port_add(struct sas_port *port)
676{
677 int error;
678
679 /* No phys should be added until this is made visible */
680 BUG_ON(!list_empty(&port->phy_list));
681
682 error = device_add(&port->dev);
683
684 if (error)
685 return error;
686
687 transport_add_device(&port->dev);
688 transport_configure_device(&port->dev);
689
690 return 0;
691}
692EXPORT_SYMBOL(sas_port_add);
693
694/**
695 * sas_port_free -- free a SAS PORT
696 * @port: SAS PORT to free
697 *
698 * Frees the specified SAS PORT.
699 *
700 * Note:
701 * This function must only be called on a PORT that has not
702 * sucessfully been added using sas_port_add().
703 */
704void sas_port_free(struct sas_port *port)
705{
706 transport_destroy_device(&port->dev);
707 put_device(&port->dev);
708}
709EXPORT_SYMBOL(sas_port_free);
710
711/**
712 * sas_port_delete -- remove SAS PORT
713 * @port: SAS PORT to remove
714 *
715 * Removes the specified SAS PORT. If the SAS PORT has an
716 * associated phys, unlink them from the port as well.
717 */
718void sas_port_delete(struct sas_port *port)
719{
720 struct device *dev = &port->dev;
721 struct sas_phy *phy, *tmp_phy;
722
723 if (port->rphy) {
724 sas_rphy_delete(port->rphy);
725 port->rphy = NULL;
726 }
727
728 mutex_lock(&port->phy_list_mutex);
729 list_for_each_entry_safe(phy, tmp_phy, &port->phy_list,
730 port_siblings) {
731 sas_port_delete_link(port, phy);
732 list_del_init(&phy->port_siblings);
733 }
734 mutex_unlock(&port->phy_list_mutex);
735
736 if (port->is_backlink) {
737 struct device *parent = port->dev.parent;
738
739 sysfs_remove_link(&port->dev.kobj, parent->bus_id);
740 port->is_backlink = 0;
741 }
742
743 transport_remove_device(dev);
744 device_del(dev);
745 transport_destroy_device(dev);
746 put_device(dev);
747}
748EXPORT_SYMBOL(sas_port_delete);
749
750/**
751 * scsi_is_sas_port -- check if a struct device represents a SAS port
752 * @dev: device to check
753 *
754 * Returns:
755 * %1 if the device represents a SAS Port, %0 else
756 */
757int scsi_is_sas_port(const struct device *dev)
758{
759 return dev->release == sas_port_release;
760}
761EXPORT_SYMBOL(scsi_is_sas_port);
762
763/**
764 * sas_port_add_phy - add another phy to a port to form a wide port
765 * @port: port to add the phy to
766 * @phy: phy to add
767 *
768 * When a port is initially created, it is empty (has no phys). All
769 * ports must have at least one phy to operated, and all wide ports
770 * must have at least two. The current code makes no difference
771 * between ports and wide ports, but the only object that can be
772 * connected to a remote device is a port, so ports must be formed on
773 * all devices with phys if they're connected to anything.
774 */
775void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
776{
777 mutex_lock(&port->phy_list_mutex);
778 if (unlikely(!list_empty(&phy->port_siblings))) {
779 /* make sure we're already on this port */
780 struct sas_phy *tmp;
781
782 list_for_each_entry(tmp, &port->phy_list, port_siblings)
783 if (tmp == phy)
784 break;
785 /* If this trips, you added a phy that was already
786 * part of a different port */
787 if (unlikely(tmp != phy)) {
788 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id);
789 BUG();
790 }
791 } else {
792 sas_port_create_link(port, phy);
793 list_add_tail(&phy->port_siblings, &port->phy_list);
794 port->num_phys++;
795 }
796 mutex_unlock(&port->phy_list_mutex);
797}
798EXPORT_SYMBOL(sas_port_add_phy);
799
800/**
801 * sas_port_delete_phy - remove a phy from a port or wide port
802 * @port: port to remove the phy from
803 * @phy: phy to remove
804 *
805 * This operation is used for tearing down ports again. It must be
806 * done to every port or wide port before calling sas_port_delete.
807 */
808void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
809{
810 mutex_lock(&port->phy_list_mutex);
811 sas_port_delete_link(port, phy);
812 list_del_init(&phy->port_siblings);
813 port->num_phys--;
814 mutex_unlock(&port->phy_list_mutex);
815}
816EXPORT_SYMBOL(sas_port_delete_phy);
817
818void sas_port_mark_backlink(struct sas_port *port)
819{
820 struct device *parent = port->dev.parent->parent->parent;
821
822 if (port->is_backlink)
823 return;
824 port->is_backlink = 1;
825 sysfs_create_link(&port->dev.kobj, &parent->kobj,
826 parent->bus_id);
827
828}
829EXPORT_SYMBOL(sas_port_mark_backlink);
830
831/*
467 * SAS remote PHY attributes. 832 * SAS remote PHY attributes.
468 */ 833 */
469 834
@@ -525,7 +890,7 @@ show_sas_rphy_enclosure_identifier(struct class_device *cdev, char *buf)
525 * Only devices behind an expander are supported, because the 890 * Only devices behind an expander are supported, because the
526 * enclosure identifier is a SMP feature. 891 * enclosure identifier is a SMP feature.
527 */ 892 */
528 if (phy->local_attached) 893 if (scsi_is_sas_phy_local(phy))
529 return -EINVAL; 894 return -EINVAL;
530 895
531 error = i->f->get_enclosure_identifier(rphy, &identifier); 896 error = i->f->get_enclosure_identifier(rphy, &identifier);
@@ -546,7 +911,7 @@ show_sas_rphy_bay_identifier(struct class_device *cdev, char *buf)
546 struct sas_internal *i = to_sas_internal(shost->transportt); 911 struct sas_internal *i = to_sas_internal(shost->transportt);
547 int val; 912 int val;
548 913
549 if (phy->local_attached) 914 if (scsi_is_sas_phy_local(phy))
550 return -EINVAL; 915 return -EINVAL;
551 916
552 val = i->f->get_bay_identifier(rphy); 917 val = i->f->get_bay_identifier(rphy);
@@ -767,7 +1132,7 @@ static void sas_rphy_initialize(struct sas_rphy *rphy)
767 * Returns: 1132 * Returns:
768 * SAS PHY allocated or %NULL if the allocation failed. 1133 * SAS PHY allocated or %NULL if the allocation failed.
769 */ 1134 */
770struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent) 1135struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
771{ 1136{
772 struct Scsi_Host *shost = dev_to_shost(&parent->dev); 1137 struct Scsi_Host *shost = dev_to_shost(&parent->dev);
773 struct sas_end_device *rdev; 1138 struct sas_end_device *rdev;
@@ -780,8 +1145,13 @@ struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent)
780 device_initialize(&rdev->rphy.dev); 1145 device_initialize(&rdev->rphy.dev);
781 rdev->rphy.dev.parent = get_device(&parent->dev); 1146 rdev->rphy.dev.parent = get_device(&parent->dev);
782 rdev->rphy.dev.release = sas_end_device_release; 1147 rdev->rphy.dev.release = sas_end_device_release;
783 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d-%d", 1148 if (scsi_is_sas_expander_device(parent->dev.parent)) {
784 shost->host_no, parent->port_identifier, parent->number); 1149 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
1150 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d",
1151 shost->host_no, rphy->scsi_target_id, parent->port_identifier);
1152 } else
1153 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d",
1154 shost->host_no, parent->port_identifier);
785 rdev->rphy.identify.device_type = SAS_END_DEVICE; 1155 rdev->rphy.identify.device_type = SAS_END_DEVICE;
786 sas_rphy_initialize(&rdev->rphy); 1156 sas_rphy_initialize(&rdev->rphy);
787 transport_setup_device(&rdev->rphy.dev); 1157 transport_setup_device(&rdev->rphy.dev);
@@ -798,7 +1168,7 @@ EXPORT_SYMBOL(sas_end_device_alloc);
798 * Returns: 1168 * Returns:
799 * SAS PHY allocated or %NULL if the allocation failed. 1169 * SAS PHY allocated or %NULL if the allocation failed.
800 */ 1170 */
801struct sas_rphy *sas_expander_alloc(struct sas_phy *parent, 1171struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
802 enum sas_device_type type) 1172 enum sas_device_type type)
803{ 1173{
804 struct Scsi_Host *shost = dev_to_shost(&parent->dev); 1174 struct Scsi_Host *shost = dev_to_shost(&parent->dev);
@@ -837,7 +1207,7 @@ EXPORT_SYMBOL(sas_expander_alloc);
837 */ 1207 */
838int sas_rphy_add(struct sas_rphy *rphy) 1208int sas_rphy_add(struct sas_rphy *rphy)
839{ 1209{
840 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 1210 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
841 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 1211 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
842 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 1212 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
843 struct sas_identify *identify = &rphy->identify; 1213 struct sas_identify *identify = &rphy->identify;
@@ -865,7 +1235,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
865 1235
866 if (identify->device_type == SAS_END_DEVICE && 1236 if (identify->device_type == SAS_END_DEVICE &&
867 rphy->scsi_target_id != -1) { 1237 rphy->scsi_target_id != -1) {
868 scsi_scan_target(&rphy->dev, parent->port_identifier, 1238 scsi_scan_target(&rphy->dev, 0,
869 rphy->scsi_target_id, ~0, 0); 1239 rphy->scsi_target_id, ~0, 0);
870 } 1240 }
871 1241
@@ -910,7 +1280,7 @@ void
910sas_rphy_delete(struct sas_rphy *rphy) 1280sas_rphy_delete(struct sas_rphy *rphy)
911{ 1281{
912 struct device *dev = &rphy->dev; 1282 struct device *dev = &rphy->dev;
913 struct sas_phy *parent = dev_to_phy(dev->parent); 1283 struct sas_port *parent = dev_to_sas_port(dev->parent);
914 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 1284 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
915 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 1285 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
916 1286
@@ -920,7 +1290,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
920 break; 1290 break;
921 case SAS_EDGE_EXPANDER_DEVICE: 1291 case SAS_EDGE_EXPANDER_DEVICE:
922 case SAS_FANOUT_EXPANDER_DEVICE: 1292 case SAS_FANOUT_EXPANDER_DEVICE:
923 device_for_each_child(dev, NULL, do_sas_phy_delete); 1293 sas_remove_children(dev);
924 break; 1294 break;
925 default: 1295 default:
926 break; 1296 break;
@@ -967,15 +1337,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
967 1337
968 mutex_lock(&sas_host->lock); 1338 mutex_lock(&sas_host->lock);
969 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1339 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
970 struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
971
972 if (rphy->identify.device_type != SAS_END_DEVICE || 1340 if (rphy->identify.device_type != SAS_END_DEVICE ||
973 rphy->scsi_target_id == -1) 1341 rphy->scsi_target_id == -1)
974 continue; 1342 continue;
975 1343
976 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 1344 if ((channel == SCAN_WILD_CARD || channel == 0) &&
977 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1345 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
978 scsi_scan_target(&rphy->dev, parent->port_identifier, 1346 scsi_scan_target(&rphy->dev, 0,
979 rphy->scsi_target_id, lun, 1); 1347 rphy->scsi_target_id, lun, 1);
980 } 1348 }
981 } 1349 }
@@ -989,13 +1357,23 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
989 * Setup / Teardown code 1357 * Setup / Teardown code
990 */ 1358 */
991 1359
992#define SETUP_TEMPLATE(attrb, field, perm, test) \ 1360#define SETUP_TEMPLATE(attrb, field, perm, test) \
993 i->private_##attrb[count] = class_device_attr_##field; \ 1361 i->private_##attrb[count] = class_device_attr_##field; \
994 i->private_##attrb[count].attr.mode = perm; \ 1362 i->private_##attrb[count].attr.mode = perm; \
995 i->attrb[count] = &i->private_##attrb[count]; \ 1363 i->attrb[count] = &i->private_##attrb[count]; \
996 if (test) \ 1364 if (test) \
997 count++ 1365 count++
998 1366
1367#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \
1368 i->private_##attrb[count] = class_device_attr_##field; \
1369 i->private_##attrb[count].attr.mode = perm; \
1370 if (ro_test) { \
1371 i->private_##attrb[count].attr.mode = ro_perm; \
1372 i->private_##attrb[count].store = NULL; \
1373 } \
1374 i->attrb[count] = &i->private_##attrb[count]; \
1375 if (test) \
1376 count++
999 1377
1000#define SETUP_RPORT_ATTRIBUTE(field) \ 1378#define SETUP_RPORT_ATTRIBUTE(field) \
1001 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) 1379 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1)
@@ -1003,16 +1381,23 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1003#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \ 1381#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \
1004 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func) 1382 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func)
1005 1383
1006#define SETUP_PORT_ATTRIBUTE(field) \ 1384#define SETUP_PHY_ATTRIBUTE(field) \
1007 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) 1385 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1)
1008 1386
1009#define SETUP_OPTIONAL_PORT_ATTRIBUTE(field, func) \ 1387#define SETUP_PHY_ATTRIBUTE_RW(field) \
1388 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
1389 !i->f->set_phy_speed, S_IRUGO)
1390
1391#define SETUP_PORT_ATTRIBUTE(field) \
1392 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
1393
1394#define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \
1010 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) 1395 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func)
1011 1396
1012#define SETUP_PORT_ATTRIBUTE_WRONLY(field) \ 1397#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \
1013 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1) 1398 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1)
1014 1399
1015#define SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(field, func) \ 1400#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \
1016 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func) 1401 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func)
1017 1402
1018#define SETUP_END_DEV_ATTRIBUTE(field) \ 1403#define SETUP_END_DEV_ATTRIBUTE(field) \
@@ -1048,6 +1433,11 @@ sas_attach_transport(struct sas_function_template *ft)
1048 i->phy_attr_cont.ac.match = sas_phy_match; 1433 i->phy_attr_cont.ac.match = sas_phy_match;
1049 transport_container_register(&i->phy_attr_cont); 1434 transport_container_register(&i->phy_attr_cont);
1050 1435
1436 i->port_attr_cont.ac.class = &sas_port_class.class;
1437 i->port_attr_cont.ac.attrs = &i->port_attrs[0];
1438 i->port_attr_cont.ac.match = sas_port_match;
1439 transport_container_register(&i->port_attr_cont);
1440
1051 i->rphy_attr_cont.ac.class = &sas_rphy_class.class; 1441 i->rphy_attr_cont.ac.class = &sas_rphy_class.class;
1052 i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0]; 1442 i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0];
1053 i->rphy_attr_cont.ac.match = sas_rphy_match; 1443 i->rphy_attr_cont.ac.match = sas_rphy_match;
@@ -1066,30 +1456,35 @@ sas_attach_transport(struct sas_function_template *ft)
1066 i->f = ft; 1456 i->f = ft;
1067 1457
1068 count = 0; 1458 count = 0;
1459 SETUP_PORT_ATTRIBUTE(num_phys);
1069 i->host_attrs[count] = NULL; 1460 i->host_attrs[count] = NULL;
1070 1461
1071 count = 0; 1462 count = 0;
1072 SETUP_PORT_ATTRIBUTE(initiator_port_protocols); 1463 SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
1073 SETUP_PORT_ATTRIBUTE(target_port_protocols); 1464 SETUP_PHY_ATTRIBUTE(target_port_protocols);
1074 SETUP_PORT_ATTRIBUTE(device_type); 1465 SETUP_PHY_ATTRIBUTE(device_type);
1075 SETUP_PORT_ATTRIBUTE(sas_address); 1466 SETUP_PHY_ATTRIBUTE(sas_address);
1076 SETUP_PORT_ATTRIBUTE(phy_identifier); 1467 SETUP_PHY_ATTRIBUTE(phy_identifier);
1077 SETUP_PORT_ATTRIBUTE(port_identifier); 1468 //SETUP_PHY_ATTRIBUTE(port_identifier);
1078 SETUP_PORT_ATTRIBUTE(negotiated_linkrate); 1469 SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
1079 SETUP_PORT_ATTRIBUTE(minimum_linkrate_hw); 1470 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
1080 SETUP_PORT_ATTRIBUTE(minimum_linkrate); 1471 SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
1081 SETUP_PORT_ATTRIBUTE(maximum_linkrate_hw); 1472 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw);
1082 SETUP_PORT_ATTRIBUTE(maximum_linkrate); 1473 SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate);
1083 1474
1084 SETUP_PORT_ATTRIBUTE(invalid_dword_count); 1475 SETUP_PHY_ATTRIBUTE(invalid_dword_count);
1085 SETUP_PORT_ATTRIBUTE(running_disparity_error_count); 1476 SETUP_PHY_ATTRIBUTE(running_disparity_error_count);
1086 SETUP_PORT_ATTRIBUTE(loss_of_dword_sync_count); 1477 SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count);
1087 SETUP_PORT_ATTRIBUTE(phy_reset_problem_count); 1478 SETUP_PHY_ATTRIBUTE(phy_reset_problem_count);
1088 SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(link_reset, phy_reset); 1479 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset);
1089 SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(hard_reset, phy_reset); 1480 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset);
1090 i->phy_attrs[count] = NULL; 1481 i->phy_attrs[count] = NULL;
1091 1482
1092 count = 0; 1483 count = 0;
1484 SETUP_PORT_ATTRIBUTE(num_phys);
1485 i->port_attrs[count] = NULL;
1486
1487 count = 0;
1093 SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols); 1488 SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols);
1094 SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols); 1489 SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols);
1095 SETUP_RPORT_ATTRIBUTE(rphy_device_type); 1490 SETUP_RPORT_ATTRIBUTE(rphy_device_type);
@@ -1131,6 +1526,7 @@ void sas_release_transport(struct scsi_transport_template *t)
1131 1526
1132 transport_container_unregister(&i->t.host_attrs); 1527 transport_container_unregister(&i->t.host_attrs);
1133 transport_container_unregister(&i->phy_attr_cont); 1528 transport_container_unregister(&i->phy_attr_cont);
1529 transport_container_unregister(&i->port_attr_cont);
1134 transport_container_unregister(&i->rphy_attr_cont); 1530 transport_container_unregister(&i->rphy_attr_cont);
1135 transport_container_unregister(&i->end_dev_attr_cont); 1531 transport_container_unregister(&i->end_dev_attr_cont);
1136 transport_container_unregister(&i->expander_attr_cont); 1532 transport_container_unregister(&i->expander_attr_cont);
@@ -1149,9 +1545,12 @@ static __init int sas_transport_init(void)
1149 error = transport_class_register(&sas_phy_class); 1545 error = transport_class_register(&sas_phy_class);
1150 if (error) 1546 if (error)
1151 goto out_unregister_transport; 1547 goto out_unregister_transport;
1152 error = transport_class_register(&sas_rphy_class); 1548 error = transport_class_register(&sas_port_class);
1153 if (error) 1549 if (error)
1154 goto out_unregister_phy; 1550 goto out_unregister_phy;
1551 error = transport_class_register(&sas_rphy_class);
1552 if (error)
1553 goto out_unregister_port;
1155 error = transport_class_register(&sas_end_dev_class); 1554 error = transport_class_register(&sas_end_dev_class);
1156 if (error) 1555 if (error)
1157 goto out_unregister_rphy; 1556 goto out_unregister_rphy;
@@ -1165,6 +1564,8 @@ static __init int sas_transport_init(void)
1165 transport_class_unregister(&sas_end_dev_class); 1564 transport_class_unregister(&sas_end_dev_class);
1166 out_unregister_rphy: 1565 out_unregister_rphy:
1167 transport_class_unregister(&sas_rphy_class); 1566 transport_class_unregister(&sas_rphy_class);
1567 out_unregister_port:
1568 transport_class_unregister(&sas_port_class);
1168 out_unregister_phy: 1569 out_unregister_phy:
1169 transport_class_unregister(&sas_phy_class); 1570 transport_class_unregister(&sas_phy_class);
1170 out_unregister_transport: 1571 out_unregister_transport:
@@ -1178,6 +1579,7 @@ static void __exit sas_transport_exit(void)
1178{ 1579{
1179 transport_class_unregister(&sas_host_class); 1580 transport_class_unregister(&sas_host_class);
1180 transport_class_unregister(&sas_phy_class); 1581 transport_class_unregister(&sas_phy_class);
1582 transport_class_unregister(&sas_port_class);
1181 transport_class_unregister(&sas_rphy_class); 1583 transport_class_unregister(&sas_rphy_class);
1182 transport_class_unregister(&sas_end_dev_class); 1584 transport_class_unregister(&sas_end_dev_class);
1183 transport_class_unregister(&sas_expander_class); 1585 transport_class_unregister(&sas_expander_class);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index ace49d5bd9c4..9f070f0d0f2b 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -18,7 +18,6 @@
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21#include <linux/config.h>
22#include <linux/ctype.h> 21#include <linux/ctype.h>
23#include <linux/init.h> 22#include <linux/init.h>
24#include <linux/module.h> 23#include <linux/module.h>
@@ -48,6 +47,7 @@
48 47
49/* Private data accessors (keep these out of the header file) */ 48/* Private data accessors (keep these out of the header file) */
50#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending) 49#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
50#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
51#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) 51#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
52 52
53struct spi_internal { 53struct spi_internal {
@@ -241,6 +241,7 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
241 spi_pcomp_en(starget) = 0; 241 spi_pcomp_en(starget) = 0;
242 spi_hold_mcs(starget) = 0; 242 spi_hold_mcs(starget) = 0;
243 spi_dv_pending(starget) = 0; 243 spi_dv_pending(starget) = 0;
244 spi_dv_in_progress(starget) = 0;
244 spi_initial_dv(starget) = 0; 245 spi_initial_dv(starget) = 0;
245 mutex_init(&spi_dv_mutex(starget)); 246 mutex_init(&spi_dv_mutex(starget));
246 247
@@ -831,28 +832,37 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
831 DV_SET(period, spi_min_period(starget)); 832 DV_SET(period, spi_min_period(starget));
832 /* try QAS requests; this should be harmless to set if the 833 /* try QAS requests; this should be harmless to set if the
833 * target supports it */ 834 * target supports it */
834 if (scsi_device_qas(sdev)) 835 if (scsi_device_qas(sdev)) {
835 DV_SET(qas, 1); 836 DV_SET(qas, 1);
836 /* Also try IU transfers */ 837 } else {
837 if (scsi_device_ius(sdev)) 838 DV_SET(qas, 0);
839 }
840
841 if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
842 /* This u320 (or u640). Set IU transfers */
838 DV_SET(iu, 1); 843 DV_SET(iu, 1);
839 if (spi_min_period(starget) < 9) { 844 /* Then set the optional parameters */
840 /* This u320 (or u640). Ignore the coupled parameters
841 * like DT and IU, but set the optional ones */
842 DV_SET(rd_strm, 1); 845 DV_SET(rd_strm, 1);
843 DV_SET(wr_flow, 1); 846 DV_SET(wr_flow, 1);
844 DV_SET(rti, 1); 847 DV_SET(rti, 1);
845 if (spi_min_period(starget) == 8) 848 if (spi_min_period(starget) == 8)
846 DV_SET(pcomp_en, 1); 849 DV_SET(pcomp_en, 1);
850 } else {
851 DV_SET(iu, 0);
847 } 852 }
853
848 /* now that we've done all this, actually check the bus 854 /* now that we've done all this, actually check the bus
849 * signal type (if known). Some devices are stupid on 855 * signal type (if known). Some devices are stupid on
850 * a SE bus and still claim they can try LVD only settings */ 856 * a SE bus and still claim they can try LVD only settings */
851 if (i->f->get_signalling) 857 if (i->f->get_signalling)
852 i->f->get_signalling(shost); 858 i->f->get_signalling(shost);
853 if (spi_signalling(shost) == SPI_SIGNAL_SE || 859 if (spi_signalling(shost) == SPI_SIGNAL_SE ||
854 spi_signalling(shost) == SPI_SIGNAL_HVD) 860 spi_signalling(shost) == SPI_SIGNAL_HVD ||
861 !scsi_device_dt(sdev)) {
855 DV_SET(dt, 0); 862 DV_SET(dt, 0);
863 } else {
864 DV_SET(dt, 1);
865 }
856 /* Do the read only INQUIRY tests */ 866 /* Do the read only INQUIRY tests */
857 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 867 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
858 spi_dv_device_compare_inquiry); 868 spi_dv_device_compare_inquiry);
@@ -908,6 +918,10 @@ spi_dv_device(struct scsi_device *sdev)
908 if (unlikely(scsi_device_get(sdev))) 918 if (unlikely(scsi_device_get(sdev)))
909 return; 919 return;
910 920
921 if (unlikely(spi_dv_in_progress(starget)))
922 return;
923 spi_dv_in_progress(starget) = 1;
924
911 buffer = kzalloc(len, GFP_KERNEL); 925 buffer = kzalloc(len, GFP_KERNEL);
912 926
913 if (unlikely(!buffer)) 927 if (unlikely(!buffer))
@@ -939,6 +953,7 @@ spi_dv_device(struct scsi_device *sdev)
939 out_free: 953 out_free:
940 kfree(buffer); 954 kfree(buffer);
941 out_put: 955 out_put:
956 spi_dv_in_progress(starget) = 0;
942 scsi_device_put(sdev); 957 scsi_device_put(sdev);
943} 958}
944EXPORT_SYMBOL(spi_dv_device); 959EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index b78354fc4b17..cd68a66c7bb3 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -57,6 +57,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
57int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) 57int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
58{ 58{
59 unsigned char *p; 59 unsigned char *p;
60 u64 capacity64 = capacity; /* Suppress gcc warning */
60 int ret; 61 int ret;
61 62
62 p = scsi_bios_ptable(bdev); 63 p = scsi_bios_ptable(bdev);
@@ -68,7 +69,7 @@ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
68 (unsigned int *)ip + 0, (unsigned int *)ip + 1); 69 (unsigned int *)ip + 0, (unsigned int *)ip + 1);
69 kfree(p); 70 kfree(p);
70 71
71 if (ret == -1) { 72 if (ret == -1 && capacity64 < (1ULL << 32)) {
72 /* pick some standard mapping with at most 1024 cylinders, 73 /* pick some standard mapping with at most 1024 cylinders,
73 and at most 62 sectors per track - this works up to 74 and at most 62 sectors per track - this works up to
74 7905 MB */ 75 7905 MB */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 354199011246..638cff41d436 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -32,7 +32,6 @@
32 * than the level indicated above to trigger output. 32 * than the level indicated above to trigger output.
33 */ 33 */
34 34
35#include <linux/config.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/fs.h> 36#include <linux/fs.h>
38#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -208,6 +207,23 @@ static ssize_t sd_store_cache_type(struct class_device *cdev, const char *buf,
208 return count; 207 return count;
209} 208}
210 209
210static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf,
211 size_t count)
212{
213 struct scsi_disk *sdkp = to_scsi_disk(cdev);
214 struct scsi_device *sdp = sdkp->device;
215
216 if (!capable(CAP_SYS_ADMIN))
217 return -EACCES;
218
219 if (sdp->type != TYPE_DISK)
220 return -EINVAL;
221
222 sdp->allow_restart = simple_strtoul(buf, NULL, 10);
223
224 return count;
225}
226
211static ssize_t sd_show_cache_type(struct class_device *cdev, char *buf) 227static ssize_t sd_show_cache_type(struct class_device *cdev, char *buf)
212{ 228{
213 struct scsi_disk *sdkp = to_scsi_disk(cdev); 229 struct scsi_disk *sdkp = to_scsi_disk(cdev);
@@ -223,10 +239,19 @@ static ssize_t sd_show_fua(struct class_device *cdev, char *buf)
223 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); 239 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
224} 240}
225 241
242static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf)
243{
244 struct scsi_disk *sdkp = to_scsi_disk(cdev);
245
246 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
247}
248
226static struct class_device_attribute sd_disk_attrs[] = { 249static struct class_device_attribute sd_disk_attrs[] = {
227 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 250 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
228 sd_store_cache_type), 251 sd_store_cache_type),
229 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), 252 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
253 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
254 sd_store_allow_restart),
230 __ATTR_NULL, 255 __ATTR_NULL,
231}; 256};
232 257
@@ -477,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
477 SCpnt->cmnd[4] = (unsigned char) this_count; 502 SCpnt->cmnd[4] = (unsigned char) this_count;
478 SCpnt->cmnd[5] = 0; 503 SCpnt->cmnd[5] = 0;
479 } 504 }
480 SCpnt->request_bufflen = SCpnt->bufflen = 505 SCpnt->request_bufflen = this_count * sdp->sector_size;
481 this_count * sdp->sector_size;
482 506
483 /* 507 /*
484 * We shouldn't disconnect in the middle of a sector, so with a dumb 508 * We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -891,11 +915,10 @@ static struct block_device_operations sd_fops = {
891static void sd_rw_intr(struct scsi_cmnd * SCpnt) 915static void sd_rw_intr(struct scsi_cmnd * SCpnt)
892{ 916{
893 int result = SCpnt->result; 917 int result = SCpnt->result;
894 int this_count = SCpnt->request_bufflen; 918 unsigned int xfer_size = SCpnt->request_bufflen;
895 int good_bytes = (result == 0 ? this_count : 0); 919 unsigned int good_bytes = result ? 0 : xfer_size;
896 sector_t block_sectors = 1; 920 u64 start_lba = SCpnt->request->sector;
897 u64 first_err_block; 921 u64 bad_lba;
898 sector_t error_sector;
899 struct scsi_sense_hdr sshdr; 922 struct scsi_sense_hdr sshdr;
900 int sense_valid = 0; 923 int sense_valid = 0;
901 int sense_deferred = 0; 924 int sense_deferred = 0;
@@ -906,7 +929,6 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
906 if (sense_valid) 929 if (sense_valid)
907 sense_deferred = scsi_sense_is_deferred(&sshdr); 930 sense_deferred = scsi_sense_is_deferred(&sshdr);
908 } 931 }
909
910#ifdef CONFIG_SCSI_LOGGING 932#ifdef CONFIG_SCSI_LOGGING
911 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n", 933 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n",
912 SCpnt->request->rq_disk->disk_name, result)); 934 SCpnt->request->rq_disk->disk_name, result));
@@ -916,89 +938,72 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
916 sshdr.sense_key, sshdr.asc, sshdr.ascq)); 938 sshdr.sense_key, sshdr.asc, sshdr.ascq));
917 } 939 }
918#endif 940#endif
919 /* 941 if (driver_byte(result) != DRIVER_SENSE &&
920 Handle MEDIUM ERRORs that indicate partial success. Since this is a 942 (!sense_valid || sense_deferred))
921 relatively rare error condition, no care is taken to avoid 943 goto out;
922 unnecessary additional work such as memcpy's that could be avoided.
923 */
924 if (driver_byte(result) != 0 &&
925 sense_valid && !sense_deferred) {
926 switch (sshdr.sense_key) {
927 case MEDIUM_ERROR:
928 if (!blk_fs_request(SCpnt->request))
929 break;
930 info_valid = scsi_get_sense_info_fld(
931 SCpnt->sense_buffer, SCSI_SENSE_BUFFERSIZE,
932 &first_err_block);
933 /*
934 * May want to warn and skip if following cast results
935 * in actual truncation (if sector_t < 64 bits)
936 */
937 error_sector = (sector_t)first_err_block;
938 if (SCpnt->request->bio != NULL)
939 block_sectors = bio_sectors(SCpnt->request->bio);
940 switch (SCpnt->device->sector_size) {
941 case 1024:
942 error_sector <<= 1;
943 if (block_sectors < 2)
944 block_sectors = 2;
945 break;
946 case 2048:
947 error_sector <<= 2;
948 if (block_sectors < 4)
949 block_sectors = 4;
950 break;
951 case 4096:
952 error_sector <<=3;
953 if (block_sectors < 8)
954 block_sectors = 8;
955 break;
956 case 256:
957 error_sector >>= 1;
958 break;
959 default:
960 break;
961 }
962 944
963 error_sector &= ~(block_sectors - 1); 945 switch (sshdr.sense_key) {
964 good_bytes = (error_sector - SCpnt->request->sector) << 9; 946 case HARDWARE_ERROR:
965 if (good_bytes < 0 || good_bytes >= this_count) 947 case MEDIUM_ERROR:
966 good_bytes = 0; 948 if (!blk_fs_request(SCpnt->request))
949 goto out;
950 info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
951 SCSI_SENSE_BUFFERSIZE,
952 &bad_lba);
953 if (!info_valid)
954 goto out;
955 if (xfer_size <= SCpnt->device->sector_size)
956 goto out;
957 switch (SCpnt->device->sector_size) {
958 case 256:
959 start_lba <<= 1;
967 break; 960 break;
968 961 case 512:
969 case RECOVERED_ERROR: /* an error occurred, but it recovered */
970 case NO_SENSE: /* LLDD got sense data */
971 /*
972 * Inform the user, but make sure that it's not treated
973 * as a hard error.
974 */
975 scsi_print_sense("sd", SCpnt);
976 SCpnt->result = 0;
977 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
978 good_bytes = this_count;
979 break; 962 break;
980 963 case 1024:
981 case ILLEGAL_REQUEST: 964 start_lba >>= 1;
982 if (SCpnt->device->use_10_for_rw && 965 break;
983 (SCpnt->cmnd[0] == READ_10 || 966 case 2048:
984 SCpnt->cmnd[0] == WRITE_10)) 967 start_lba >>= 2;
985 SCpnt->device->use_10_for_rw = 0; 968 break;
986 if (SCpnt->device->use_10_for_ms && 969 case 4096:
987 (SCpnt->cmnd[0] == MODE_SENSE_10 || 970 start_lba >>= 3;
988 SCpnt->cmnd[0] == MODE_SELECT_10))
989 SCpnt->device->use_10_for_ms = 0;
990 break; 971 break;
991
992 default: 972 default:
973 /* Print something here with limiting frequency. */
974 goto out;
993 break; 975 break;
994 } 976 }
977 /* This computation should always be done in terms of
978 * the resolution of the device's medium.
979 */
980 good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
981 break;
982 case RECOVERED_ERROR:
983 case NO_SENSE:
984 /* Inform the user, but make sure that it's not treated
985 * as a hard error.
986 */
987 scsi_print_sense("sd", SCpnt);
988 SCpnt->result = 0;
989 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
990 good_bytes = xfer_size;
991 break;
992 case ILLEGAL_REQUEST:
993 if (SCpnt->device->use_10_for_rw &&
994 (SCpnt->cmnd[0] == READ_10 ||
995 SCpnt->cmnd[0] == WRITE_10))
996 SCpnt->device->use_10_for_rw = 0;
997 if (SCpnt->device->use_10_for_ms &&
998 (SCpnt->cmnd[0] == MODE_SENSE_10 ||
999 SCpnt->cmnd[0] == MODE_SELECT_10))
1000 SCpnt->device->use_10_for_ms = 0;
1001 break;
1002 default:
1003 break;
995 } 1004 }
996 /* 1005 out:
997 * This calls the generic completion function, now that we know 1006 scsi_io_completion(SCpnt, good_bytes);
998 * how many actual sectors finished, and how many sectors we need
999 * to say have failed.
1000 */
1001 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
1002} 1007}
1003 1008
1004static int media_not_present(struct scsi_disk *sdkp, 1009static int media_not_present(struct scsi_disk *sdkp,
@@ -1210,7 +1215,7 @@ repeat:
1210 /* Either no media are present but the drive didn't tell us, 1215 /* Either no media are present but the drive didn't tell us,
1211 or they are present but the read capacity command fails */ 1216 or they are present but the read capacity command fails */
1212 /* sdkp->media_present = 0; -- not always correct */ 1217 /* sdkp->media_present = 0; -- not always correct */
1213 sdkp->capacity = 0x200000; /* 1 GB - random */ 1218 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1214 1219
1215 return; 1220 return;
1216 } else if (the_result && longrc) { 1221 } else if (the_result && longrc) {
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 7fa4da4ea64f..2679ea8bff1a 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -497,7 +497,7 @@ int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
497 return 0; 497 return 0;
498 498
499 hostno = instance->host_no; 499 hostno = instance->host_no;
500 if (request_irq (irq, do_seagate_reconnect_intr, SA_INTERRUPT, (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", instance)) { 500 if (request_irq (irq, do_seagate_reconnect_intr, IRQF_DISABLED, (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", instance)) {
501 printk(KERN_ERR "scsi%d : unable to allocate IRQ%d\n", hostno, irq); 501 printk(KERN_ERR "scsi%d : unable to allocate IRQ%d\n", hostno, irq);
502 return 0; 502 return 0;
503 } 503 }
@@ -1002,7 +1002,7 @@ connect_loop:
1002 } 1002 }
1003#endif 1003#endif
1004 1004
1005 buffer = (struct scatterlist *) SCint->buffer; 1005 buffer = (struct scatterlist *) SCint->request_buffer;
1006 len = buffer->length; 1006 len = buffer->length;
1007 data = page_address(buffer->page) + buffer->offset; 1007 data = page_address(buffer->page) + buffer->offset;
1008 } else { 1008 } else {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 98b9312ba8da..34f9343ed0af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21static int sg_version_num = 30533; /* 2 digits for each component */ 21static int sg_version_num = 30534; /* 2 digits for each component */
22#define SG_VERSION_STR "3.5.33" 22#define SG_VERSION_STR "3.5.34"
23 23
24/* 24/*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -28,7 +28,6 @@ static int sg_version_num = 30533; /* 2 digits for each component */
28 * (otherwise the macros compile to empty statements). 28 * (otherwise the macros compile to empty statements).
29 * 29 *
30 */ 30 */
31#include <linux/config.h>
32#include <linux/module.h> 31#include <linux/module.h>
33 32
34#include <linux/fs.h> 33#include <linux/fs.h>
@@ -61,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
61 60
62#ifdef CONFIG_SCSI_PROC_FS 61#ifdef CONFIG_SCSI_PROC_FS
63#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
64static char *sg_version_date = "20050908"; 63static char *sg_version_date = "20060818";
65 64
66static int sg_proc_init(void); 65static int sg_proc_init(void);
67static void sg_proc_cleanup(void); 66static void sg_proc_cleanup(void);
@@ -1165,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1165 len = vma->vm_end - sa; 1164 len = vma->vm_end - sa;
1166 len = (len < sg->length) ? len : sg->length; 1165 len = (len < sg->length) ? len : sg->length;
1167 if (offset < len) { 1166 if (offset < len) {
1168 page = sg->page; 1167 page = virt_to_page(page_address(sg->page) + offset);
1169 get_page(page); /* increment page count */ 1168 get_page(page); /* increment page count */
1170 break; 1169 break;
1171 } 1170 }
@@ -1402,6 +1401,7 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1402 Sg_device *sdp = NULL; 1401 Sg_device *sdp = NULL;
1403 struct cdev * cdev = NULL; 1402 struct cdev * cdev = NULL;
1404 int error, k; 1403 int error, k;
1404 unsigned long iflags;
1405 1405
1406 disk = alloc_disk(1); 1406 disk = alloc_disk(1);
1407 if (!disk) { 1407 if (!disk) {
@@ -1429,7 +1429,7 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1429 1429
1430 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1); 1430 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1431 if (error) 1431 if (error)
1432 goto out; 1432 goto cdev_add_err;
1433 1433
1434 sdp->cdev = cdev; 1434 sdp->cdev = cdev;
1435 if (sg_sysfs_valid) { 1435 if (sg_sysfs_valid) {
@@ -1456,6 +1456,13 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1456 1456
1457 return 0; 1457 return 0;
1458 1458
1459cdev_add_err:
1460 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1461 kfree(sg_dev_arr[k]);
1462 sg_dev_arr[k] = NULL;
1463 sg_nr_dev--;
1464 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1465
1459out: 1466out:
1460 put_disk(disk); 1467 put_disk(disk);
1461 if (cdev) 1468 if (cdev)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 7cd366fcc571..4f1db6f2aae8 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -97,7 +97,7 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs)
97} 97}
98 98
99static inline 99static inline
100void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp) 100void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
101{ 101{
102 unsigned long len = cmd->SCp.this_residual; 102 unsigned long len = cmd->SCp.this_residual;
103 void *addr = cmd->SCp.ptr; 103 void *addr = cmd->SCp.ptr;
@@ -129,7 +129,7 @@ void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp)
129 hcp->desc.cntinfo = HPCDMA_EOX; 129 hcp->desc.cntinfo = HPCDMA_EOX;
130} 130}
131 131
132static int dma_setup(Scsi_Cmnd *cmd, int datainp) 132static int dma_setup(struct scsi_cmnd *cmd, int datainp)
133{ 133{
134 struct ip22_hostdata *hdata = HDATA(cmd->device->host); 134 struct ip22_hostdata *hdata = HDATA(cmd->device->host);
135 struct hpc3_scsiregs *hregs = 135 struct hpc3_scsiregs *hregs =
@@ -163,7 +163,7 @@ static int dma_setup(Scsi_Cmnd *cmd, int datainp)
163 return 0; 163 return 0;
164} 164}
165 165
166static void dma_stop(struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 166static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
167 int status) 167 int status)
168{ 168{
169 struct ip22_hostdata *hdata = HDATA(instance); 169 struct ip22_hostdata *hdata = HDATA(instance);
@@ -305,7 +305,7 @@ static int sgiwd93_release(struct Scsi_Host *instance)
305 return 1; 305 return 1;
306} 306}
307 307
308static int sgiwd93_bus_reset(Scsi_Cmnd *cmd) 308static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
309{ 309{
310 /* FIXME perform bus-specific reset */ 310 /* FIXME perform bus-specific reset */
311 311
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 255886a9ac55..551baccec523 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -26,7 +26,6 @@
26 * 26 *
27 */ 27 */
28 28
29#include <linux/config.h>
30#include <linux/module.h> 29#include <linux/module.h>
31 30
32#include <linux/blkdev.h> 31#include <linux/blkdev.h>
@@ -134,7 +133,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
134 host->this_id = scsi_id; 133 host->this_id = scsi_id;
135 host->base = base_addr; 134 host->base = base_addr;
136 host->irq = irq; 135 host->irq = irq;
137 if (request_irq(irq, NCR_700_intr, SA_SHIRQ, "sim710", host)) { 136 if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) {
138 printk(KERN_ERR "sim710: request_irq failed\n"); 137 printk(KERN_ERR "sim710: request_irq failed\n");
139 goto out_put_host; 138 goto out_put_host;
140 } 139 }
@@ -283,6 +282,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
283 { "HWP0C80" }, 282 { "HWP0C80" },
284 { "" } 283 { "" }
285}; 284};
285MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
286 286
287static __init int 287static __init int
288sim710_eisa_probe(struct device *dev) 288sim710_eisa_probe(struct device *dev)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ebf6579ed698..fae6e95a6298 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,7 @@ static void rw_intr(struct scsi_cmnd * SCpnt)
292 * how many actual sectors finished, and how many sectors we need 292 * how many actual sectors finished, and how many sectors we need
293 * to say have failed. 293 * to say have failed.
294 */ 294 */
295 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9); 295 scsi_io_completion(SCpnt, good_bytes);
296} 296}
297 297
298static int sr_init_command(struct scsi_cmnd * SCpnt) 298static int sr_init_command(struct scsi_cmnd * SCpnt)
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
360 "mismatch count %d, bytes %d\n", 360 "mismatch count %d, bytes %d\n",
361 size, SCpnt->request_bufflen); 361 size, SCpnt->request_bufflen);
362 if (SCpnt->request_bufflen > size) 362 if (SCpnt->request_bufflen > size)
363 SCpnt->request_bufflen = SCpnt->bufflen = size; 363 SCpnt->request_bufflen = size;
364 } 364 }
365 } 365 }
366 366
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
387 387
388 if (this_count > 0xffff) { 388 if (this_count > 0xffff) {
389 this_count = 0xffff; 389 this_count = 0xffff;
390 SCpnt->request_bufflen = SCpnt->bufflen = 390 SCpnt->request_bufflen = this_count * s_size;
391 this_count * s_size;
392 } 391 }
393 392
394 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 393 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 9dde8df2f5c9..a3e9d0f2eb5b 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -34,7 +34,6 @@
34 * HP 6020 writers now supported. 34 * HP 6020 writers now supported.
35 */ 35 */
36 36
37#include <linux/config.h>
38#include <linux/cdrom.h> 37#include <linux/cdrom.h>
39#include <linux/errno.h> 38#include <linux/errno.h>
40#include <linux/string.h> 39#include <linux/string.h>
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1272dd249af3..7f669b600677 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
370 if (cmdstatp->have_sense) 370 if (cmdstatp->have_sense)
371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
372 } ) /* end DEB */ 372 } ) /* end DEB */
373 if (!debugging) { /* Abnormal conditions for tape */ 373 if (!debugging) { /* Abnormal conditions for tape */
374 if (!cmdstatp->have_sense) 374 if (!cmdstatp->have_sense)
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
384 scode != VOLUME_OVERFLOW && 384 scode != VOLUME_OVERFLOW &&
385 SRpnt->cmd[0] != MODE_SENSE && 385 SRpnt->cmd[0] != MODE_SENSE &&
386 SRpnt->cmd[0] != TEST_UNIT_READY) { 386 SRpnt->cmd[0] != TEST_UNIT_READY) {
387 printk(KERN_WARNING "%s: Error with sense data: ", name); 387
388 __scsi_print_sense("st", SRpnt->sense, 388 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
389 SCSI_SENSE_BUFFERSIZE);
390 } 389 }
391 } 390 }
392 391
@@ -2818,7 +2817,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2818 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2817 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
2819 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 2818 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
2820 undone == 0) { 2819 undone == 0) {
2821 ioctl_result = 0; /* EOF written succesfully at EOM */ 2820 ioctl_result = 0; /* EOF written successfully at EOM */
2822 if (fileno >= 0) 2821 if (fileno >= 0)
2823 fileno++; 2822 fileno++;
2824 STps->drv_file = fileno; 2823 STps->drv_file = fileno;
@@ -3599,7 +3598,6 @@ static struct st_buffer *
3599 tb->use_sg = max_sg; 3598 tb->use_sg = max_sg;
3600 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg); 3599 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
3601 3600
3602 tb->in_use = 1;
3603 tb->dma = need_dma; 3601 tb->dma = need_dma;
3604 tb->buffer_size = got; 3602 tb->buffer_size = got;
3605 3603
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 411209048d74..05a5cae126ec 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -31,7 +31,6 @@ struct st_request {
31 31
32/* The tape buffer descriptor. */ 32/* The tape buffer descriptor. */
33struct st_buffer { 33struct st_buffer {
34 unsigned char in_use;
35 unsigned char dma; /* DMA-able buffer */ 34 unsigned char dma; /* DMA-able buffer */
36 unsigned char do_dio; /* direct i/o set up? */ 35 unsigned char do_dio; /* direct i/o set up? */
37 int buffer_size; 36 int buffer_size;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
new file mode 100644
index 000000000000..3cf3106a29b8
--- /dev/null
+++ b/drivers/scsi/stex.c
@@ -0,0 +1,1252 @@
1/*
2 * SuperTrak EX Series Storage Controller driver for Linux
3 *
4 * Copyright (C) 2005, 2006 Promise Technology Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Written By:
12 * Ed Lin <promise_linux@promise.com>
13 *
14 * Version: 2.9.0.13
15 *
16 */
17
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/time.h>
24#include <linux/pci.h>
25#include <linux/blkdev.h>
26#include <linux/interrupt.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/spinlock.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/byteorder.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38
39#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "2.9.0.13"
41#define ST_VER_MAJOR 2
42#define ST_VER_MINOR 9
43#define ST_OEM 0
44#define ST_BUILD_VER 13
45
46enum {
47 /* MU register offset */
48 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
49 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
50 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
51 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
52 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
53 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
54 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
55 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
56 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
57 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
58
59 /* MU register value */
60 MU_INBOUND_DOORBELL_HANDSHAKE = 1,
61 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
62 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
63 MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
64 MU_INBOUND_DOORBELL_RESET = 16,
65
66 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
67 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
68 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
69 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
70 MU_OUTBOUND_DOORBELL_HASEVENT = 16,
71
72 /* MU status code */
73 MU_STATE_STARTING = 1,
74 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
75 MU_STATE_SEND_HANDSHAKE_FRAME = 3,
76 MU_STATE_STARTED = 4,
77 MU_STATE_RESETTING = 5,
78
79 MU_MAX_DELAY_TIME = 240000,
80 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
81 HMU_PARTNER_TYPE = 2,
82
83 /* firmware returned values */
84 SRB_STATUS_SUCCESS = 0x01,
85 SRB_STATUS_ERROR = 0x04,
86 SRB_STATUS_BUSY = 0x05,
87 SRB_STATUS_INVALID_REQUEST = 0x06,
88 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
89 SRB_SEE_SENSE = 0x80,
90
91 /* task attribute */
92 TASK_ATTRIBUTE_SIMPLE = 0x0,
93 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
94 TASK_ATTRIBUTE_ORDERED = 0x2,
95 TASK_ATTRIBUTE_ACA = 0x4,
96
97 /* request count, etc. */
98 MU_MAX_REQUEST = 32,
99
100 /* one message wasted, use MU_MAX_REQUEST+1
101 to handle MU_MAX_REQUEST messages */
102 MU_REQ_COUNT = (MU_MAX_REQUEST + 1),
103 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1),
104
105 STEX_CDB_LENGTH = MAX_COMMAND_SIZE,
106 REQ_VARIABLE_LEN = 1024,
107 STATUS_VAR_LEN = 128,
108 ST_CAN_QUEUE = MU_MAX_REQUEST,
109 ST_CMD_PER_LUN = MU_MAX_REQUEST,
110 ST_MAX_SG = 32,
111
112 /* sg flags */
113 SG_CF_EOT = 0x80, /* end of table */
114 SG_CF_64B = 0x40, /* 64 bit item */
115 SG_CF_HOST = 0x20, /* sg in host memory */
116
117 ST_MAX_ARRAY_SUPPORTED = 16,
118 ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1),
119 ST_MAX_LUN_PER_TARGET = 16,
120
121 st_shasta = 0,
122 st_vsc = 1,
123
124 PASSTHRU_REQ_TYPE = 0x00000001,
125 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
126 ST_INTERNAL_TIMEOUT = 30,
127
128 /* vendor specific commands of Promise */
129 ARRAY_CMD = 0xe0,
130 CONTROLLER_CMD = 0xe1,
131 DEBUGGING_CMD = 0xe2,
132 PASSTHRU_CMD = 0xe3,
133
134 PASSTHRU_GET_ADAPTER = 0x05,
135 PASSTHRU_GET_DRVVER = 0x10,
136 CTLR_POWER_STATE_CHANGE = 0x0e,
137 CTLR_POWER_SAVING = 0x01,
138
139 PASSTHRU_SIGNATURE = 0x4e415041,
140
141 INQUIRY_EVPD = 0x01,
142};
143
144struct st_sgitem {
145 u8 ctrl; /* SG_CF_xxx */
146 u8 reserved[3];
147 __le32 count;
148 __le32 addr;
149 __le32 addr_hi;
150};
151
152struct st_sgtable {
153 __le16 sg_count;
154 __le16 max_sg_count;
155 __le32 sz_in_byte;
156 struct st_sgitem table[ST_MAX_SG];
157};
158
159struct handshake_frame {
160 __le32 rb_phy; /* request payload queue physical address */
161 __le32 rb_phy_hi;
162 __le16 req_sz; /* size of each request payload */
163 __le16 req_cnt; /* count of reqs the buffer can hold */
164 __le16 status_sz; /* size of each status payload */
165 __le16 status_cnt; /* count of status the buffer can hold */
166 __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */
167 __le32 hosttime_hi;
168 u8 partner_type; /* who sends this frame */
169 u8 reserved0[7];
170 __le32 partner_ver_major;
171 __le32 partner_ver_minor;
172 __le32 partner_ver_oem;
173 __le32 partner_ver_build;
174 u32 reserved1[4];
175};
176
177struct req_msg {
178 __le16 tag;
179 u8 lun;
180 u8 target;
181 u8 task_attr;
182 u8 task_manage;
183 u8 prd_entry;
184 u8 payload_sz; /* payload size in 4-byte */
185 u8 cdb[STEX_CDB_LENGTH];
186 u8 variable[REQ_VARIABLE_LEN];
187};
188
189struct status_msg {
190 __le16 tag;
191 u8 lun;
192 u8 target;
193 u8 srb_status;
194 u8 scsi_status;
195 u8 reserved;
196 u8 payload_sz; /* payload size in 4-byte */
197 u8 variable[STATUS_VAR_LEN];
198};
199
200struct ver_info {
201 u32 major;
202 u32 minor;
203 u32 oem;
204 u32 build;
205 u32 reserved[2];
206};
207
208struct st_frame {
209 u32 base[6];
210 u32 rom_addr;
211
212 struct ver_info drv_ver;
213 struct ver_info bios_ver;
214
215 u32 bus;
216 u32 slot;
217 u32 irq_level;
218 u32 irq_vec;
219 u32 id;
220 u32 subid;
221
222 u32 dimm_size;
223 u8 dimm_type;
224 u8 reserved[3];
225
226 u32 channel;
227 u32 reserved1;
228};
229
230struct st_drvver {
231 u32 major;
232 u32 minor;
233 u32 oem;
234 u32 build;
235 u32 signature[2];
236 u8 console_id;
237 u8 host_no;
238 u8 reserved0[2];
239 u32 reserved[3];
240};
241
242#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
243#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
244#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
245#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame))
246
247struct st_ccb {
248 struct req_msg *req;
249 struct scsi_cmnd *cmd;
250
251 void *sense_buffer;
252 unsigned int sense_bufflen;
253 int sg_count;
254
255 u32 req_type;
256 u8 srb_status;
257 u8 scsi_status;
258};
259
260struct st_hba {
261 void __iomem *mmio_base; /* iomapped PCI memory space */
262 void *dma_mem;
263 dma_addr_t dma_handle;
264
265 struct Scsi_Host *host;
266 struct pci_dev *pdev;
267
268 u32 req_head;
269 u32 req_tail;
270 u32 status_head;
271 u32 status_tail;
272
273 struct status_msg *status_buffer;
274 void *copy_buffer; /* temp buffer for driver-handled commands */
275 struct st_ccb ccb[MU_MAX_REQUEST];
276 struct st_ccb *wait_ccb;
277 wait_queue_head_t waitq;
278
279 unsigned int mu_status;
280 int out_req_cnt;
281
282 unsigned int cardtype;
283};
284
285static const char console_inq_page[] =
286{
287 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
288 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
289 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
290 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
291 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
292 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
293 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
294 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
295};
296
297MODULE_AUTHOR("Ed Lin");
298MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
299MODULE_LICENSE("GPL");
300MODULE_VERSION(ST_DRIVER_VERSION);
301
302static void stex_gettime(__le32 *time)
303{
304 struct timeval tv;
305 do_gettimeofday(&tv);
306
307 *time = cpu_to_le32(tv.tv_sec & 0xffffffff);
308 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
309}
310
311static struct status_msg *stex_get_status(struct st_hba *hba)
312{
313 struct status_msg *status =
314 hba->status_buffer + hba->status_tail;
315
316 ++hba->status_tail;
317 hba->status_tail %= MU_STATUS_COUNT;
318
319 return status;
320}
321
322static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
323{
324 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
325
326 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
327 cmd->sense_buffer[2] = sk;
328 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
329 cmd->sense_buffer[12] = asc;
330 cmd->sense_buffer[13] = ascq;
331}
332
333static void stex_invalid_field(struct scsi_cmnd *cmd,
334 void (*done)(struct scsi_cmnd *))
335{
336 /* "Invalid field in cbd" */
337 stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
338 done(cmd);
339}
340
341static struct req_msg *stex_alloc_req(struct st_hba *hba)
342{
343 struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
344 hba->req_head;
345
346 ++hba->req_head;
347 hba->req_head %= MU_REQ_COUNT;
348
349 return req;
350}
351
352static int stex_map_sg(struct st_hba *hba,
353 struct req_msg *req, struct st_ccb *ccb)
354{
355 struct pci_dev *pdev = hba->pdev;
356 struct scsi_cmnd *cmd;
357 dma_addr_t dma_handle;
358 struct scatterlist *src;
359 struct st_sgtable *dst;
360 int i;
361
362 cmd = ccb->cmd;
363 dst = (struct st_sgtable *)req->variable;
364 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
365 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
366
367 if (cmd->use_sg) {
368 int n_elem;
369
370 src = (struct scatterlist *) cmd->request_buffer;
371 n_elem = pci_map_sg(pdev, src,
372 cmd->use_sg, cmd->sc_data_direction);
373 if (n_elem <= 0)
374 return -EIO;
375
376 ccb->sg_count = n_elem;
377 dst->sg_count = cpu_to_le16((u16)n_elem);
378
379 for (i = 0; i < n_elem; i++, src++) {
380 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
381 dst->table[i].addr =
382 cpu_to_le32(sg_dma_address(src) & 0xffffffff);
383 dst->table[i].addr_hi =
384 cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
385 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
386 }
387 dst->table[--i].ctrl |= SG_CF_EOT;
388 return 0;
389 }
390
391 dma_handle = pci_map_single(pdev, cmd->request_buffer,
392 cmd->request_bufflen, cmd->sc_data_direction);
393 cmd->SCp.dma_handle = dma_handle;
394
395 ccb->sg_count = 1;
396 dst->sg_count = cpu_to_le16(1);
397 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
398 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
399 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
400 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
401
402 return 0;
403}
404
405static void stex_internal_copy(struct scsi_cmnd *cmd,
406 const void *src, size_t *count, int sg_count)
407{
408 size_t lcount;
409 size_t len;
410 void *s, *d, *base = NULL;
411 if (*count > cmd->request_bufflen)
412 *count = cmd->request_bufflen;
413 lcount = *count;
414 while (lcount) {
415 len = lcount;
416 s = (void *)src;
417 if (cmd->use_sg) {
418 size_t offset = *count - lcount;
419 s += offset;
420 base = scsi_kmap_atomic_sg(cmd->request_buffer,
421 sg_count, &offset, &len);
422 if (base == NULL) {
423 *count -= lcount;
424 return;
425 }
426 d = base + offset;
427 } else
428 d = cmd->request_buffer;
429
430 memcpy(d, s, len);
431
432 lcount -= len;
433 if (cmd->use_sg)
434 scsi_kunmap_atomic_sg(base);
435 }
436}
437
438static int stex_direct_copy(struct scsi_cmnd *cmd,
439 const void *src, size_t count)
440{
441 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
442 size_t cp_len = count;
443 int n_elem = 0;
444
445 if (cmd->use_sg) {
446 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
447 cmd->use_sg, cmd->sc_data_direction);
448 if (n_elem <= 0)
449 return 0;
450 }
451
452 stex_internal_copy(cmd, src, &cp_len, n_elem);
453
454 if (cmd->use_sg)
455 pci_unmap_sg(hba->pdev, cmd->request_buffer,
456 cmd->use_sg, cmd->sc_data_direction);
457 return cp_len == count;
458}
459
460static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
461{
462 struct st_frame *p;
463 size_t count = sizeof(struct st_frame);
464
465 p = hba->copy_buffer;
466 memset(p->base, 0, sizeof(u32)*6);
467 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
468 p->rom_addr = 0;
469
470 p->drv_ver.major = ST_VER_MAJOR;
471 p->drv_ver.minor = ST_VER_MINOR;
472 p->drv_ver.oem = ST_OEM;
473 p->drv_ver.build = ST_BUILD_VER;
474
475 p->bus = hba->pdev->bus->number;
476 p->slot = hba->pdev->devfn;
477 p->irq_level = 0;
478 p->irq_vec = hba->pdev->irq;
479 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
480 p->subid =
481 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
482
483 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count);
484}
485
486static void
487stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
488{
489 req->tag = cpu_to_le16(tag);
490 req->task_attr = TASK_ATTRIBUTE_SIMPLE;
491 req->task_manage = 0; /* not supported yet */
492 req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32));
493
494 hba->ccb[tag].req = req;
495 hba->out_req_cnt++;
496
497 writel(hba->req_head, hba->mmio_base + IMR0);
498 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
499 readl(hba->mmio_base + IDBL); /* flush */
500}
501
502static int
503stex_slave_alloc(struct scsi_device *sdev)
504{
505 /* Cheat: usually extracted from Inquiry data */
506 sdev->tagged_supported = 1;
507
508 scsi_activate_tcq(sdev, sdev->host->can_queue);
509
510 return 0;
511}
512
513static int
514stex_slave_config(struct scsi_device *sdev)
515{
516 sdev->use_10_for_rw = 1;
517 sdev->use_10_for_ms = 1;
518 sdev->timeout = 60 * HZ;
519 sdev->tagged_supported = 1;
520
521 return 0;
522}
523
524static void
525stex_slave_destroy(struct scsi_device *sdev)
526{
527 scsi_deactivate_tcq(sdev, 1);
528}
529
530static int
531stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
532{
533 struct st_hba *hba;
534 struct Scsi_Host *host;
535 unsigned int id,lun;
536 struct req_msg *req;
537 u16 tag;
538 host = cmd->device->host;
539 id = cmd->device->id;
540 lun = cmd->device->channel; /* firmware lun issue work around */
541 hba = (struct st_hba *) &host->hostdata[0];
542
543 switch (cmd->cmnd[0]) {
544 case MODE_SENSE_10:
545 {
546 static char ms10_caching_page[12] =
547 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
548 unsigned char page;
549 page = cmd->cmnd[2] & 0x3f;
550 if (page == 0x8 || page == 0x3f) {
551 stex_direct_copy(cmd, ms10_caching_page,
552 sizeof(ms10_caching_page));
553 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
554 done(cmd);
555 } else
556 stex_invalid_field(cmd, done);
557 return 0;
558 }
559 case INQUIRY:
560 if (id != ST_MAX_ARRAY_SUPPORTED)
561 break;
562 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
563 stex_direct_copy(cmd, console_inq_page,
564 sizeof(console_inq_page));
565 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
566 done(cmd);
567 } else
568 stex_invalid_field(cmd, done);
569 return 0;
570 case PASSTHRU_CMD:
571 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
572 struct st_drvver ver;
573 ver.major = ST_VER_MAJOR;
574 ver.minor = ST_VER_MINOR;
575 ver.oem = ST_OEM;
576 ver.build = ST_BUILD_VER;
577 ver.signature[0] = PASSTHRU_SIGNATURE;
578 ver.console_id = ST_MAX_ARRAY_SUPPORTED;
579 ver.host_no = hba->host->host_no;
580 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
581 DID_OK << 16 | COMMAND_COMPLETE << 8 :
582 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
583 done(cmd);
584 return 0;
585 }
586 default:
587 break;
588 }
589
590 cmd->scsi_done = done;
591
592 tag = cmd->request->tag;
593
594 if (unlikely(tag >= host->can_queue))
595 return SCSI_MLQUEUE_HOST_BUSY;
596
597 req = stex_alloc_req(hba);
598 req->lun = lun;
599 req->target = id;
600
601 /* cdb */
602 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
603
604 hba->ccb[tag].cmd = cmd;
605 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
606 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
607 hba->ccb[tag].req_type = 0;
608
609 if (cmd->sc_data_direction != DMA_NONE)
610 stex_map_sg(hba, req, &hba->ccb[tag]);
611
612 stex_send_cmd(hba, req, tag);
613 return 0;
614}
615
616static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
617{
618 if (cmd->sc_data_direction != DMA_NONE) {
619 if (cmd->use_sg)
620 pci_unmap_sg(hba->pdev, cmd->request_buffer,
621 cmd->use_sg, cmd->sc_data_direction);
622 else
623 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
624 cmd->request_bufflen, cmd->sc_data_direction);
625 }
626}
627
628static void stex_scsi_done(struct st_ccb *ccb)
629{
630 struct scsi_cmnd *cmd = ccb->cmd;
631 int result;
632
633 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
634 result = ccb->scsi_status;
635 switch (ccb->scsi_status) {
636 case SAM_STAT_GOOD:
637 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
638 break;
639 case SAM_STAT_CHECK_CONDITION:
640 result |= DRIVER_SENSE << 24;
641 break;
642 case SAM_STAT_BUSY:
643 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
644 break;
645 default:
646 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
647 break;
648 }
649 }
650 else if (ccb->srb_status & SRB_SEE_SENSE)
651 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
652 else switch (ccb->srb_status) {
653 case SRB_STATUS_SELECTION_TIMEOUT:
654 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
655 break;
656 case SRB_STATUS_BUSY:
657 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
658 break;
659 case SRB_STATUS_INVALID_REQUEST:
660 case SRB_STATUS_ERROR:
661 default:
662 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
663 break;
664 }
665
666 cmd->result = result;
667 cmd->scsi_done(cmd);
668}
669
670static void stex_copy_data(struct st_ccb *ccb,
671 struct status_msg *resp, unsigned int variable)
672{
673 size_t count = variable;
674 if (resp->scsi_status != SAM_STAT_GOOD) {
675 if (ccb->sense_buffer != NULL)
676 memcpy(ccb->sense_buffer, resp->variable,
677 min(variable, ccb->sense_bufflen));
678 return;
679 }
680
681 if (ccb->cmd == NULL)
682 return;
683 stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count);
684}
685
686static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
687{
688 void __iomem *base = hba->mmio_base;
689 struct status_msg *resp;
690 struct st_ccb *ccb;
691 unsigned int size;
692 u16 tag;
693
694 if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))
695 return;
696
697 /* status payloads */
698 hba->status_head = readl(base + OMR1);
699 if (unlikely(hba->status_head >= MU_STATUS_COUNT)) {
700 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
701 pci_name(hba->pdev));
702 return;
703 }
704
705 if (unlikely(hba->mu_status != MU_STATE_STARTED ||
706 hba->out_req_cnt <= 0)) {
707 hba->status_tail = hba->status_head;
708 goto update_status;
709 }
710
711 while (hba->status_tail != hba->status_head) {
712 resp = stex_get_status(hba);
713 tag = le16_to_cpu(resp->tag);
714 if (unlikely(tag >= hba->host->can_queue)) {
715 printk(KERN_WARNING DRV_NAME
716 "(%s): invalid tag\n", pci_name(hba->pdev));
717 continue;
718 }
719
720 ccb = &hba->ccb[tag];
721 if (hba->wait_ccb == ccb)
722 hba->wait_ccb = NULL;
723 if (unlikely(ccb->req == NULL)) {
724 printk(KERN_WARNING DRV_NAME
725 "(%s): lagging req\n", pci_name(hba->pdev));
726 continue;
727 }
728
729 size = resp->payload_sz * sizeof(u32); /* payload size */
730 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
731 size > sizeof(*resp))) {
732 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
733 pci_name(hba->pdev));
734 } else {
735 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
736 if (size)
737 stex_copy_data(ccb, resp, size);
738 }
739
740 ccb->srb_status = resp->srb_status;
741 ccb->scsi_status = resp->scsi_status;
742
743 if (likely(ccb->cmd != NULL)) {
744 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
745 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
746 stex_controller_info(hba, ccb);
747 stex_unmap_sg(hba, ccb->cmd);
748 stex_scsi_done(ccb);
749 hba->out_req_cnt--;
750 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
751 hba->out_req_cnt--;
752 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
753 ccb->req_type = 0;
754 continue;
755 }
756 ccb->req_type = 0;
757 if (waitqueue_active(&hba->waitq))
758 wake_up(&hba->waitq);
759 }
760 }
761
762update_status:
763 writel(hba->status_head, base + IMR1);
764 readl(base + IMR1); /* flush */
765}
766
767static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs)
768{
769 struct st_hba *hba = __hba;
770 void __iomem *base = hba->mmio_base;
771 u32 data;
772 unsigned long flags;
773 int handled = 0;
774
775 spin_lock_irqsave(hba->host->host_lock, flags);
776
777 data = readl(base + ODBL);
778
779 if (data && data != 0xffffffff) {
780 /* clear the interrupt */
781 writel(data, base + ODBL);
782 readl(base + ODBL); /* flush */
783 stex_mu_intr(hba, data);
784 handled = 1;
785 }
786
787 spin_unlock_irqrestore(hba->host->host_lock, flags);
788
789 return IRQ_RETVAL(handled);
790}
791
792static int stex_handshake(struct st_hba *hba)
793{
794 void __iomem *base = hba->mmio_base;
795 struct handshake_frame *h;
796 dma_addr_t status_phys;
797 int i;
798
799 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
800 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
801 readl(base + IDBL);
802 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
803 && i < MU_MAX_DELAY_TIME; i++) {
804 rmb();
805 msleep(1);
806 }
807
808 if (i == MU_MAX_DELAY_TIME) {
809 printk(KERN_ERR DRV_NAME
810 "(%s): no handshake signature\n",
811 pci_name(hba->pdev));
812 return -1;
813 }
814 }
815
816 udelay(10);
817
818 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
819 h->rb_phy = cpu_to_le32(hba->dma_handle);
820 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
821 h->req_sz = cpu_to_le16(sizeof(struct req_msg));
822 h->req_cnt = cpu_to_le16(MU_REQ_COUNT);
823 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
824 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
825 stex_gettime(&h->hosttime);
826 h->partner_type = HMU_PARTNER_TYPE;
827
828 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
829 writel(status_phys, base + IMR0);
830 readl(base + IMR0);
831 writel((status_phys >> 16) >> 16, base + IMR1);
832 readl(base + IMR1);
833
834 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
835 readl(base + OMR0);
836 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
837 readl(base + IDBL); /* flush */
838
839 udelay(10);
840 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
841 && i < MU_MAX_DELAY_TIME; i++) {
842 rmb();
843 msleep(1);
844 }
845
846 if (i == MU_MAX_DELAY_TIME) {
847 printk(KERN_ERR DRV_NAME
848 "(%s): no signature after handshake frame\n",
849 pci_name(hba->pdev));
850 return -1;
851 }
852
853 writel(0, base + IMR0);
854 readl(base + IMR0);
855 writel(0, base + OMR0);
856 readl(base + OMR0);
857 writel(0, base + IMR1);
858 readl(base + IMR1);
859 writel(0, base + OMR1);
860 readl(base + OMR1); /* flush */
861 hba->mu_status = MU_STATE_STARTED;
862 return 0;
863}
864
865static int stex_abort(struct scsi_cmnd *cmd)
866{
867 struct Scsi_Host *host = cmd->device->host;
868 struct st_hba *hba = (struct st_hba *)host->hostdata;
869 u16 tag = cmd->request->tag;
870 void __iomem *base;
871 u32 data;
872 int result = SUCCESS;
873 unsigned long flags;
874 base = hba->mmio_base;
875 spin_lock_irqsave(host->host_lock, flags);
876 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
877 hba->wait_ccb = &hba->ccb[tag];
878 else {
879 for (tag = 0; tag < host->can_queue; tag++)
880 if (hba->ccb[tag].cmd == cmd) {
881 hba->wait_ccb = &hba->ccb[tag];
882 break;
883 }
884 if (tag >= host->can_queue)
885 goto out;
886 }
887
888 data = readl(base + ODBL);
889 if (data == 0 || data == 0xffffffff)
890 goto fail_out;
891
892 writel(data, base + ODBL);
893 readl(base + ODBL); /* flush */
894
895 stex_mu_intr(hba, data);
896
897 if (hba->wait_ccb == NULL) {
898 printk(KERN_WARNING DRV_NAME
899 "(%s): lost interrupt\n", pci_name(hba->pdev));
900 goto out;
901 }
902
903fail_out:
904 stex_unmap_sg(hba, cmd);
905 hba->wait_ccb->req = NULL; /* nullify the req's future return */
906 hba->wait_ccb = NULL;
907 result = FAILED;
908out:
909 spin_unlock_irqrestore(host->host_lock, flags);
910 return result;
911}
912
913static void stex_hard_reset(struct st_hba *hba)
914{
915 struct pci_bus *bus;
916 int i;
917 u16 pci_cmd;
918 u8 pci_bctl;
919
920 for (i = 0; i < 16; i++)
921 pci_read_config_dword(hba->pdev, i * 4,
922 &hba->pdev->saved_config_space[i]);
923
924 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
925 secondary bus. Consult Intel 80331/3 developer's manual for detail */
926 bus = hba->pdev->bus;
927 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
928 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
929 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
930 msleep(1);
931 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
932 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
933
934 for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
935 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
936 if (pci_cmd & PCI_COMMAND_MASTER)
937 break;
938 msleep(1);
939 }
940
941 ssleep(5);
942 for (i = 0; i < 16; i++)
943 pci_write_config_dword(hba->pdev, i * 4,
944 hba->pdev->saved_config_space[i]);
945}
946
947static int stex_reset(struct scsi_cmnd *cmd)
948{
949 struct st_hba *hba;
950 unsigned long flags;
951 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
952
953 hba->mu_status = MU_STATE_RESETTING;
954
955 if (hba->cardtype == st_shasta)
956 stex_hard_reset(hba);
957
958 if (stex_handshake(hba)) {
959 printk(KERN_WARNING DRV_NAME
960 "(%s): resetting: handshake failed\n",
961 pci_name(hba->pdev));
962 return FAILED;
963 }
964 spin_lock_irqsave(hba->host->host_lock, flags);
965 hba->req_head = 0;
966 hba->req_tail = 0;
967 hba->status_head = 0;
968 hba->status_tail = 0;
969 hba->out_req_cnt = 0;
970 spin_unlock_irqrestore(hba->host->host_lock, flags);
971
972 return SUCCESS;
973}
974
975static int stex_biosparam(struct scsi_device *sdev,
976 struct block_device *bdev, sector_t capacity, int geom[])
977{
978 int heads = 255, sectors = 63, cylinders;
979
980 if (capacity < 0x200000) {
981 heads = 64;
982 sectors = 32;
983 }
984
985 cylinders = sector_div(capacity, heads * sectors);
986
987 geom[0] = heads;
988 geom[1] = sectors;
989 geom[2] = cylinders;
990
991 return 0;
992}
993
994static struct scsi_host_template driver_template = {
995 .module = THIS_MODULE,
996 .name = DRV_NAME,
997 .proc_name = DRV_NAME,
998 .bios_param = stex_biosparam,
999 .queuecommand = stex_queuecommand,
1000 .slave_alloc = stex_slave_alloc,
1001 .slave_configure = stex_slave_config,
1002 .slave_destroy = stex_slave_destroy,
1003 .eh_abort_handler = stex_abort,
1004 .eh_host_reset_handler = stex_reset,
1005 .can_queue = ST_CAN_QUEUE,
1006 .this_id = -1,
1007 .sg_tablesize = ST_MAX_SG,
1008 .cmd_per_lun = ST_CMD_PER_LUN,
1009};
1010
1011static int stex_set_dma_mask(struct pci_dev * pdev)
1012{
1013 int ret;
1014 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
1015 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1016 return 0;
1017 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1018 if (!ret)
1019 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1020 return ret;
1021}
1022
1023static int __devinit
1024stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1025{
1026 struct st_hba *hba;
1027 struct Scsi_Host *host;
1028 int err;
1029
1030 err = pci_enable_device(pdev);
1031 if (err)
1032 return err;
1033
1034 pci_set_master(pdev);
1035
1036 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1037
1038 if (!host) {
1039 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1040 pci_name(pdev));
1041 err = -ENOMEM;
1042 goto out_disable;
1043 }
1044
1045 hba = (struct st_hba *)host->hostdata;
1046 memset(hba, 0, sizeof(struct st_hba));
1047
1048 err = pci_request_regions(pdev, DRV_NAME);
1049 if (err < 0) {
1050 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1051 pci_name(pdev));
1052 goto out_scsi_host_put;
1053 }
1054
1055 hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
1056 pci_resource_len(pdev, 0));
1057 if ( !hba->mmio_base) {
1058 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1059 pci_name(pdev));
1060 err = -ENOMEM;
1061 goto out_release_regions;
1062 }
1063
1064 err = stex_set_dma_mask(pdev);
1065 if (err) {
1066 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1067 pci_name(pdev));
1068 goto out_iounmap;
1069 }
1070
1071 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1072 STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
1073 if (!hba->dma_mem) {
1074 err = -ENOMEM;
1075 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1076 pci_name(pdev));
1077 goto out_iounmap;
1078 }
1079
1080 hba->status_buffer =
1081 (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
1082 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1083 hba->mu_status = MU_STATE_STARTING;
1084
1085 hba->cardtype = (unsigned int) id->driver_data;
1086
1087 /* firmware uses id/lun pair for a logical drive, but lun would be
1088 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
1089 channel to map lun here */
1090 host->max_channel = ST_MAX_LUN_PER_TARGET - 1;
1091 host->max_id = ST_MAX_TARGET_NUM;
1092 host->max_lun = 1;
1093 host->unique_id = host->host_no;
1094 host->max_cmd_len = STEX_CDB_LENGTH;
1095
1096 hba->host = host;
1097 hba->pdev = pdev;
1098 init_waitqueue_head(&hba->waitq);
1099
1100 err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba);
1101 if (err) {
1102 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1103 pci_name(pdev));
1104 goto out_pci_free;
1105 }
1106
1107 err = stex_handshake(hba);
1108 if (err)
1109 goto out_free_irq;
1110
1111 err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
1112 if (err) {
1113 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1114 pci_name(pdev));
1115 goto out_free_irq;
1116 }
1117
1118 pci_set_drvdata(pdev, hba);
1119
1120 err = scsi_add_host(host, &pdev->dev);
1121 if (err) {
1122 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1123 pci_name(pdev));
1124 goto out_free_irq;
1125 }
1126
1127 scsi_scan_host(host);
1128
1129 return 0;
1130
1131out_free_irq:
1132 free_irq(pdev->irq, hba);
1133out_pci_free:
1134 dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
1135 hba->dma_mem, hba->dma_handle);
1136out_iounmap:
1137 iounmap(hba->mmio_base);
1138out_release_regions:
1139 pci_release_regions(pdev);
1140out_scsi_host_put:
1141 scsi_host_put(host);
1142out_disable:
1143 pci_disable_device(pdev);
1144
1145 return err;
1146}
1147
1148static void stex_hba_stop(struct st_hba *hba)
1149{
1150 struct req_msg *req;
1151 unsigned long flags;
1152 unsigned long before;
1153 u16 tag = 0;
1154
1155 spin_lock_irqsave(hba->host->host_lock, flags);
1156 req = stex_alloc_req(hba);
1157 memset(req->cdb, 0, STEX_CDB_LENGTH);
1158
1159 req->cdb[0] = CONTROLLER_CMD;
1160 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1161 req->cdb[2] = CTLR_POWER_SAVING;
1162
1163 hba->ccb[tag].cmd = NULL;
1164 hba->ccb[tag].sg_count = 0;
1165 hba->ccb[tag].sense_bufflen = 0;
1166 hba->ccb[tag].sense_buffer = NULL;
1167 hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
1168
1169 stex_send_cmd(hba, req, tag);
1170 spin_unlock_irqrestore(hba->host->host_lock, flags);
1171
1172 before = jiffies;
1173 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1174 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1175 return;
1176 msleep(10);
1177 }
1178}
1179
1180static void stex_hba_free(struct st_hba *hba)
1181{
1182 free_irq(hba->pdev->irq, hba);
1183
1184 iounmap(hba->mmio_base);
1185
1186 pci_release_regions(hba->pdev);
1187
1188 dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
1189 hba->dma_mem, hba->dma_handle);
1190}
1191
1192static void stex_remove(struct pci_dev *pdev)
1193{
1194 struct st_hba *hba = pci_get_drvdata(pdev);
1195
1196 scsi_remove_host(hba->host);
1197
1198 pci_set_drvdata(pdev, NULL);
1199
1200 stex_hba_stop(hba);
1201
1202 stex_hba_free(hba);
1203
1204 scsi_host_put(hba->host);
1205
1206 pci_disable_device(pdev);
1207}
1208
1209static void stex_shutdown(struct pci_dev *pdev)
1210{
1211 struct st_hba *hba = pci_get_drvdata(pdev);
1212
1213 stex_hba_stop(hba);
1214}
1215
1216static struct pci_device_id stex_pci_tbl[] = {
1217 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1218 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1219 { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1220 { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1221 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1222 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1223 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1224 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1225 { } /* terminate list */
1226};
1227MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1228
1229static struct pci_driver stex_pci_driver = {
1230 .name = DRV_NAME,
1231 .id_table = stex_pci_tbl,
1232 .probe = stex_probe,
1233 .remove = __devexit_p(stex_remove),
1234 .shutdown = stex_shutdown,
1235};
1236
1237static int __init stex_init(void)
1238{
1239 printk(KERN_INFO DRV_NAME
1240 ": Promise SuperTrak EX Driver version: %s\n",
1241 ST_DRIVER_VERSION);
1242
1243 return pci_register_driver(&stex_pci_driver);
1244}
1245
1246static void __exit stex_exit(void)
1247{
1248 pci_unregister_driver(&stex_pci_driver);
1249}
1250
1251module_init(stex_init);
1252module_exit(stex_exit);
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 25cced91c8a6..2f8073b73bf3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
517 */ 517 */
518 518
519 if (cmd->use_sg) { 519 if (cmd->use_sg) {
520 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 520 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
521 cmd->SCp.buffers_residual = cmd->use_sg - 1; 521 cmd->SCp.buffers_residual = cmd->use_sg - 1;
522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
523 cmd->SCp.this_residual = cmd->SCp.buffer->length; 523 cmd->SCp.this_residual = cmd->SCp.buffer->length;
@@ -535,7 +535,6 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
535 535
536} 536}
537 537
538#include <linux/config.h>
539#include <linux/delay.h> 538#include <linux/delay.h>
540 539
541#if 1 540#if 1
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index cc990bed9683..6b60536ac92b 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -97,7 +97,7 @@ int sun3x_esp_detect(struct scsi_host_template *tpnt)
97 esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command); 97 esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command);
98 98
99 esp->irq = 2; 99 esp->irq = 2;
100 if (request_irq(esp->irq, esp_intr, SA_INTERRUPT, 100 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
101 "SUN3X SCSI", esp->ehost)) { 101 "SUN3X SCSI", esp->ehost)) {
102 esp_deallocate(esp); 102 esp_deallocate(esp);
103 return 0; 103 return 0;
@@ -332,11 +332,11 @@ static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
332 struct scatterlist *sg = sp->SCp.buffer; 332 struct scatterlist *sg = sp->SCp.buffer;
333 333
334 while (sz >= 0) { 334 while (sz >= 0) {
335 sg[sz].dvma_address = dvma_map((unsigned long)page_address(sg[sz].page) + 335 sg[sz].dma_address = dvma_map((unsigned long)page_address(sg[sz].page) +
336 sg[sz].offset, sg[sz].length); 336 sg[sz].offset, sg[sz].length);
337 sz--; 337 sz--;
338 } 338 }
339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dvma_address); 339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
340} 340}
341 341
342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
@@ -347,17 +347,17 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
348{ 348{
349 int sz = sp->use_sg - 1; 349 int sz = sp->use_sg - 1;
350 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351 351
352 while(sz >= 0) { 352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dvma_address); 353 dvma_unmap((char *)sg[sz].dma_address);
354 sz--; 354 sz--;
355 } 355 }
356} 356}
357 357
358static void dma_advance_sg (Scsi_Cmnd *sp) 358static void dma_advance_sg (Scsi_Cmnd *sp)
359{ 359{
360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dvma_address); 360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address);
361} 361}
362 362
363static int sun3x_esp_release(struct Scsi_Host *instance) 363static int sun3x_esp_release(struct Scsi_Host *instance)
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
index 481103769729..7519728dfc38 100644
--- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -40,7 +40,6 @@
40#ifndef SYM53C8XX_H 40#ifndef SYM53C8XX_H
41#define SYM53C8XX_H 41#define SYM53C8XX_H
42 42
43#include <linux/config.h>
44 43
45/* 44/*
46 * DMA addressing mode. 45 * DMA addressing mode.
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index ea82d3df63af..739d3ef46a40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1547,7 +1547,7 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1547 * If we synchonize the C code with SCRIPTS on interrupt, 1547 * If we synchonize the C code with SCRIPTS on interrupt,
1548 * we do not want to share the INTR line at all. 1548 * we do not want to share the INTR line at all.
1549 */ 1549 */
1550 if (request_irq(pdev->irq, sym53c8xx_intr, SA_SHIRQ, NAME53C8XX, np)) { 1550 if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, np)) {
1551 printf_err("%s: request irq %d failure\n", 1551 printf_err("%s: request irq %d failure\n",
1552 sym_name(np), pdev->irq); 1552 sym_name(np), pdev->irq);
1553 goto attach_failed; 1553 goto attach_failed;
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
2087 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2087 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index a446cda3f64c..e022d3c71b59 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -40,7 +40,6 @@
40#ifndef SYM_GLUE_H 40#ifndef SYM_GLUE_H
41#define SYM_GLUE_H 41#define SYM_GLUE_H
42 42
43#include <linux/config.h>
44#include <linux/delay.h> 43#include <linux/delay.h>
45#include <linux/ioport.h> 44#include <linux/ioport.h>
46#include <linux/pci.h> 45#include <linux/pci.h>
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index a24f661b0270..2df6747cb76f 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -260,7 +260,7 @@ found:
260 instance->irq = NCR5380_probe_irq(instance, T128_IRQS); 260 instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
261 261
262 if (instance->irq != SCSI_IRQ_NONE) 262 if (instance->irq != SCSI_IRQ_NONE)
263 if (request_irq(instance->irq, t128_intr, SA_INTERRUPT, "t128", instance)) { 263 if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) {
264 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 264 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
265 instance->host_no, instance->irq); 265 instance->host_no, instance->irq);
266 instance->irq = SCSI_IRQ_NONE; 266 instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 91322aff241d..9404ff3d4c79 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -218,7 +218,6 @@
218#endif 218#endif
219#define DCBDEBUG1(x) C_NOP 219#define DCBDEBUG1(x) C_NOP
220 220
221#include <linux/config.h>
222#include <linux/module.h> 221#include <linux/module.h>
223#include <linux/delay.h> 222#include <linux/delay.h>
224#include <linux/signal.h> 223#include <linux/signal.h>
@@ -2585,7 +2584,7 @@ static int __devinit dc390_probe_one(struct pci_dev *pdev,
2585 /* Reset Pending INT */ 2584 /* Reset Pending INT */
2586 DC390_read8_(INT_Status, io_port); 2585 DC390_read8_(INT_Status, io_port);
2587 2586
2588 if (request_irq(pdev->irq, do_DC390_Interrupt, SA_SHIRQ, 2587 if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED,
2589 "tmscsim", pACB)) { 2588 "tmscsim", pACB)) {
2590 printk(KERN_ERR "DC390: register IRQ error!\n"); 2589 printk(KERN_ERR "DC390: register IRQ error!\n");
2591 goto out_release_region; 2590 goto out_release_region;
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index d4495272fb40..9b66fa8d38d9 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -9,7 +9,6 @@
9#define _TMSCSIM_H 9#define _TMSCSIM_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/config.h>
13 12
14#define SCSI_IRQ_NONE 255 13#define SCSI_IRQ_NONE 255
15 14
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 33cd90fc657b..57449611e714 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -405,7 +405,6 @@
405 * the driver sets host->wish_block = TRUE for all ISA boards. 405 * the driver sets host->wish_block = TRUE for all ISA boards.
406 */ 406 */
407 407
408#include <linux/config.h>
409#include <linux/string.h> 408#include <linux/string.h>
410#include <linux/kernel.h> 409#include <linux/kernel.h>
411#include <linux/ioport.h> 410#include <linux/ioport.h>
@@ -873,7 +872,7 @@ static int port_detect \
873 872
874 /* Board detected, allocate its IRQ */ 873 /* Board detected, allocate its IRQ */
875 if (request_irq(irq, do_interrupt_handler, 874 if (request_irq(irq, do_interrupt_handler,
876 SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0), 875 IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
877 driver_name, (void *) &sha[j])) { 876 driver_name, (void *) &sha[j])) {
878 printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); 877 printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
879 goto freelock; 878 goto freelock;
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index e681681ab7a2..0372aa9fa190 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -196,8 +196,8 @@ struct mscp {
196 u32 sense_data PACKED; 196 u32 sense_data PACKED;
197 /* The following fields are for software only. They are included in 197 /* The following fields are for software only. They are included in
198 the MSCP structure because they are associated with SCSI requests. */ 198 the MSCP structure because they are associated with SCSI requests. */
199 void (*done)(Scsi_Cmnd *); 199 void (*done) (struct scsi_cmnd *);
200 Scsi_Cmnd *SCint; 200 struct scsi_cmnd *SCint;
201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */ 201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
202}; 202};
203 203
@@ -289,7 +289,7 @@ static const unsigned short ultrastor_ports_14f[] = {
289 289
290static void ultrastor_interrupt(int, void *, struct pt_regs *); 290static void ultrastor_interrupt(int, void *, struct pt_regs *);
291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *); 291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *);
292static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt); 292static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
293 293
294 294
295/* Always called with host lock held */ 295/* Always called with host lock held */
@@ -673,7 +673,7 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
673 return buf; 673 return buf;
674} 674}
675 675
676static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt) 676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
677{ 677{
678 struct scatterlist *sl; 678 struct scatterlist *sl;
679 long transfer_length = 0; 679 long transfer_length = 0;
@@ -694,7 +694,8 @@ static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt)
694 mscp->transfer_data_length = transfer_length; 694 mscp->transfer_data_length = transfer_length;
695} 695}
696 696
697static int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 697static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
698 void (*done) (struct scsi_cmnd *))
698{ 699{
699 struct mscp *my_mscp; 700 struct mscp *my_mscp;
700#if ULTRASTOR_MAX_CMDS > 1 701#if ULTRASTOR_MAX_CMDS > 1
@@ -833,7 +834,7 @@ retry:
833 834
834 */ 835 */
835 836
836static int ultrastor_abort(Scsi_Cmnd *SCpnt) 837static int ultrastor_abort(struct scsi_cmnd *SCpnt)
837{ 838{
838#if ULTRASTOR_DEBUG & UD_ABORT 839#if ULTRASTOR_DEBUG & UD_ABORT
839 char out[108]; 840 char out[108];
@@ -843,7 +844,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
843 unsigned int mscp_index; 844 unsigned int mscp_index;
844 unsigned char old_aborted; 845 unsigned char old_aborted;
845 unsigned long flags; 846 unsigned long flags;
846 void (*done)(Scsi_Cmnd *); 847 void (*done)(struct scsi_cmnd *);
847 struct Scsi_Host *host = SCpnt->device->host; 848 struct Scsi_Host *host = SCpnt->device->host;
848 849
849 if(config.slot) 850 if(config.slot)
@@ -960,7 +961,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
960 return SUCCESS; 961 return SUCCESS;
961} 962}
962 963
963static int ultrastor_host_reset(Scsi_Cmnd * SCpnt) 964static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
964{ 965{
965 unsigned long flags; 966 unsigned long flags;
966 int i; 967 int i;
@@ -1045,8 +1046,8 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1045 unsigned int mscp_index; 1046 unsigned int mscp_index;
1046#endif 1047#endif
1047 struct mscp *mscp; 1048 struct mscp *mscp;
1048 void (*done)(Scsi_Cmnd *); 1049 void (*done) (struct scsi_cmnd *);
1049 Scsi_Cmnd *SCtmp; 1050 struct scsi_cmnd *SCtmp;
1050 1051
1051#if ULTRASTOR_MAX_CMDS == 1 1052#if ULTRASTOR_MAX_CMDS == 1
1052 mscp = &config.mscp[0]; 1053 mscp = &config.mscp[0];
@@ -1079,7 +1080,7 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1079 return; 1080 return;
1080 } 1081 }
1081 if (icm_status == 3) { 1082 if (icm_status == 3) {
1082 void (*done)(Scsi_Cmnd *) = mscp->done; 1083 void (*done)(struct scsi_cmnd *) = mscp->done;
1083 if (done) { 1084 if (done) {
1084 mscp->done = NULL; 1085 mscp->done = NULL;
1085 mscp->SCint->result = DID_ABORT << 16; 1086 mscp->SCint->result = DID_ABORT << 16;
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
index da759a11deff..a692905f95f7 100644
--- a/drivers/scsi/ultrastor.h
+++ b/drivers/scsi/ultrastor.h
@@ -14,11 +14,13 @@
14#define _ULTRASTOR_H 14#define _ULTRASTOR_H
15 15
16static int ultrastor_detect(struct scsi_host_template *); 16static int ultrastor_detect(struct scsi_host_template *);
17static const char *ultrastor_info(struct Scsi_Host * shpnt); 17static const char *ultrastor_info(struct Scsi_Host *shpnt);
18static int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 18static int ultrastor_queuecommand(struct scsi_cmnd *,
19static int ultrastor_abort(Scsi_Cmnd *); 19 void (*done)(struct scsi_cmnd *));
20static int ultrastor_host_reset(Scsi_Cmnd *); 20static int ultrastor_abort(struct scsi_cmnd *);
21static int ultrastor_biosparam(struct scsi_device *, struct block_device *, sector_t, int *); 21static int ultrastor_host_reset(struct scsi_cmnd *);
22static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
23 sector_t, int *);
22 24
23 25
24#define ULTRASTOR_14F_MAX_SG 16 26#define ULTRASTOR_14F_MAX_SG 16
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 27307fe5a4c8..2083454db511 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -71,7 +71,6 @@
71 * Richard Hirst <richard@sleepie.demon.co.uk> August 2000 71 * Richard Hirst <richard@sleepie.demon.co.uk> August 2000
72 */ 72 */
73 73
74#include <linux/config.h>
75#include <linux/module.h> 74#include <linux/module.h>
76 75
77#include <linux/sched.h> 76#include <linux/sched.h>
@@ -374,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
374 */ 373 */
375 374
376 if (cmd->use_sg) { 375 if (cmd->use_sg) {
377 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 376 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
378 cmd->SCp.buffers_residual = cmd->use_sg - 1; 377 cmd->SCp.buffers_residual = cmd->use_sg - 1;
379 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
380 cmd->SCp.buffer->offset; 379 cmd->SCp.buffer->offset;
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 193ec517d252..edcb0365cf0c 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -22,7 +22,6 @@
22#ifndef WD33C93_H 22#ifndef WD33C93_H
23#define WD33C93_H 23#define WD33C93_H
24 24
25#include <linux/config.h>
26 25
27#define PROC_INTERFACE /* add code for /proc/scsi/wd33c93/xxx interface */ 26#define PROC_INTERFACE /* add code for /proc/scsi/wd33c93/xxx interface */
28#ifdef PROC_INTERFACE 27#ifdef PROC_INTERFACE
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index e55f0ee7e7e4..a0b61af48f1c 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1250,7 +1250,7 @@ static int wd7000_init(Adapter * host)
1250 return 0; 1250 return 0;
1251 1251
1252 1252
1253 if (request_irq(host->irq, wd7000_intr, SA_INTERRUPT, "wd7000", host)) { 1253 if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) {
1254 printk("wd7000_init: can't get IRQ %d.\n", host->irq); 1254 printk("wd7000_init: can't get IRQ %d.\n", host->irq);
1255 return (0); 1255 return (0);
1256 } 1256 }
@@ -1391,7 +1391,7 @@ static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start,
1391 * 1391 *
1392 */ 1392 */
1393 1393
1394static int wd7000_detect(struct scsi_host_template *tpnt) 1394static __init int wd7000_detect(struct scsi_host_template *tpnt)
1395{ 1395{
1396 short present = 0, biosaddr_ptr, sig_ptr, i, pass; 1396 short present = 0, biosaddr_ptr, sig_ptr, i, pass;
1397 short biosptr[NUM_CONFIGS]; 1397 short biosptr[NUM_CONFIGS];
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index a6cfbb3b361c..4b5f908d35c3 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -136,7 +136,7 @@ zalon_probe(struct parisc_device *dev)
136 if (!host) 136 if (!host)
137 goto fail; 137 goto fail;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, SA_SHIRQ, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ", 140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ",
141 dev->dev.bus_id, dev->irq); 141 dev->dev.bus_id, dev->irq);
142 goto fail; 142 goto fail;