aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-08 05:41:28 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-08 05:41:28 -0400
commit142e27fc8a3619471669d6241784eec9167c47d1 (patch)
treee88850b63ec910ee28874f93c43fb66421bb8119 /drivers/scsi
parenta9053d0494d3c92807701c0f47df61d50c971581 (diff)
parentcaf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-xxxx.c57
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c9
-rw-r--r--drivers/scsi/NCR53c406a.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c327
-rw-r--r--drivers/scsi/aacraid/aacraid.h55
-rw-r--r--drivers/scsi/aacraid/commctrl.c20
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c20
-rw-r--r--drivers/scsi/aacraid/linit.c106
-rw-r--r--drivers/scsi/aacraid/rkt.c20
-rw-r--r--drivers/scsi/aacraid/rx.c20
-rw-r--r--drivers/scsi/aacraid/sa.c22
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/ahci.c54
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx1
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c104
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4570
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h288
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c82
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c14
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c88
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.seq5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c36
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c60
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c97
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c29
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped933
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c1377
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h890
-rw-r--r--drivers/scsi/ata_piix.c11
-rw-r--r--drivers/scsi/ch.c42
-rw-r--r--drivers/scsi/constants.c49
-rw-r--r--drivers/scsi/hosts.c113
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c179
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/libata-core.c19
-rw-r--r--drivers/scsi/libata-scsi.c8
-rw-r--r--drivers/scsi/libata.h1
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mesh.c6
-rw-r--r--drivers/scsi/qla1280.c359
-rw-r--r--drivers/scsi/qla1280.h336
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c136
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h157
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c564
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/raid_class.c250
-rw-r--r--drivers/scsi/sata_mv.c843
-rw-r--r--drivers/scsi/sata_nv.c9
-rw-r--r--drivers/scsi/sata_promise.c20
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c12
-rw-r--r--drivers/scsi/sata_svw.c7
-rw-r--r--drivers/scsi/sata_sx4.c54
-rw-r--r--drivers/scsi/sata_uli.c4
-rw-r--r--drivers/scsi/sata_vsc.c5
-rw-r--r--drivers/scsi/scsi.c17
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_error.c48
-rw-r--r--drivers/scsi/scsi_ioctl.c64
-rw-r--r--drivers/scsi/scsi_lib.c296
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c128
-rw-r--r--drivers/scsi/scsi_sysfs.c62
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/scsi_transport_spi.c168
-rw-r--r--drivers/scsi/sd.c185
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c75
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c62
-rw-r--r--drivers/scsi/st.c156
-rw-r--r--drivers/scsi/st.h3
103 files changed, 6019 insertions, 8229 deletions
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 973c51fb0fe2..ae9e0203e9de 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1499,22 +1499,43 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
1499 return 0; 1499 return 0;
1500} /* End tw_scsiop_inquiry() */ 1500} /* End tw_scsiop_inquiry() */
1501 1501
1502static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1503 void *data, unsigned int len)
1504{
1505 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1506 void *buf;
1507 unsigned int transfer_len;
1508
1509 if (cmd->use_sg) {
1510 struct scatterlist *sg =
1511 (struct scatterlist *)cmd->request_buffer;
1512 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1513 transfer_len = min(sg->length, len);
1514 } else {
1515 buf = cmd->request_buffer;
1516 transfer_len = min(cmd->request_bufflen, len);
1517 }
1518
1519 memcpy(buf, data, transfer_len);
1520
1521 if (cmd->use_sg) {
1522 struct scatterlist *sg;
1523
1524 sg = (struct scatterlist *)cmd->request_buffer;
1525 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1526 }
1527}
1528
1502/* This function is called by the isr to complete an inquiry command */ 1529/* This function is called by the isr to complete an inquiry command */
1503static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id) 1530static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id)
1504{ 1531{
1505 unsigned char *is_unit_present; 1532 unsigned char *is_unit_present;
1506 unsigned char *request_buffer; 1533 unsigned char request_buffer[36];
1507 TW_Param *param; 1534 TW_Param *param;
1508 1535
1509 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n"); 1536 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n");
1510 1537
1511 /* Fill request buffer */ 1538 memset(request_buffer, 0, sizeof(request_buffer));
1512 if (tw_dev->srb[request_id]->request_buffer == NULL) {
1513 printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry_complete(): Request buffer NULL.\n");
1514 return 1;
1515 }
1516 request_buffer = tw_dev->srb[request_id]->request_buffer;
1517 memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
1518 request_buffer[0] = TYPE_DISK; /* Peripheral device type */ 1539 request_buffer[0] = TYPE_DISK; /* Peripheral device type */
1519 request_buffer[1] = 0; /* Device type modifier */ 1540 request_buffer[1] = 0; /* Device type modifier */
1520 request_buffer[2] = 0; /* No ansi/iso compliance */ 1541 request_buffer[2] = 0; /* No ansi/iso compliance */
@@ -1522,6 +1543,8 @@ static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_i
1522 memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */ 1543 memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */
1523 sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id); 1544 sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id);
1524 memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3); 1545 memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3);
1546 tw_transfer_internal(tw_dev, request_id, request_buffer,
1547 sizeof(request_buffer));
1525 1548
1526 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; 1549 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
1527 if (param == NULL) { 1550 if (param == NULL) {
@@ -1612,7 +1635,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1612{ 1635{
1613 TW_Param *param; 1636 TW_Param *param;
1614 unsigned char *flags; 1637 unsigned char *flags;
1615 unsigned char *request_buffer; 1638 unsigned char request_buffer[8];
1616 1639
1617 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n"); 1640 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n");
1618 1641
@@ -1622,8 +1645,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1622 return 1; 1645 return 1;
1623 } 1646 }
1624 flags = (char *)&(param->data[0]); 1647 flags = (char *)&(param->data[0]);
1625 request_buffer = tw_dev->srb[request_id]->buffer; 1648 memset(request_buffer, 0, sizeof(request_buffer));
1626 memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
1627 1649
1628 request_buffer[0] = 0xf; /* mode data length */ 1650 request_buffer[0] = 0xf; /* mode data length */
1629 request_buffer[1] = 0; /* default medium type */ 1651 request_buffer[1] = 0; /* default medium type */
@@ -1635,6 +1657,8 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1635 request_buffer[6] = 0x4; /* WCE on */ 1657 request_buffer[6] = 0x4; /* WCE on */
1636 else 1658 else
1637 request_buffer[6] = 0x0; /* WCE off */ 1659 request_buffer[6] = 0x0; /* WCE off */
1660 tw_transfer_internal(tw_dev, request_id, request_buffer,
1661 sizeof(request_buffer));
1638 1662
1639 return 0; 1663 return 0;
1640} /* End tw_scsiop_mode_sense_complete() */ 1664} /* End tw_scsiop_mode_sense_complete() */
@@ -1701,17 +1725,12 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
1701{ 1725{
1702 unsigned char *param_data; 1726 unsigned char *param_data;
1703 u32 capacity; 1727 u32 capacity;
1704 char *buff; 1728 char buff[8];
1705 TW_Param *param; 1729 TW_Param *param;
1706 1730
1707 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n"); 1731 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n");
1708 1732
1709 buff = tw_dev->srb[request_id]->request_buffer; 1733 memset(buff, 0, sizeof(buff));
1710 if (buff == NULL) {
1711 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Request buffer NULL.\n");
1712 return 1;
1713 }
1714 memset(buff, 0, tw_dev->srb[request_id]->request_bufflen);
1715 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; 1734 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
1716 if (param == NULL) { 1735 if (param == NULL) {
1717 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n"); 1736 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n");
@@ -1739,6 +1758,8 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
1739 buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff; 1758 buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff;
1740 buff[7] = TW_BLOCK_SIZE & 0xff; 1759 buff[7] = TW_BLOCK_SIZE & 0xff;
1741 1760
1761 tw_transfer_internal(tw_dev, request_id, buff, sizeof(buff));
1762
1742 return 0; 1763 return 0;
1743} /* End tw_scsiop_read_capacity_complete() */ 1764} /* End tw_scsiop_read_capacity_complete() */
1744 1765
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 12c208fb18c5..2d21265e650b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,5 +1,11 @@
1menu "SCSI device support" 1menu "SCSI device support"
2 2
3config RAID_ATTRS
4 tristate "RAID Transport Class"
5 default n
6 ---help---
7 Provides RAID
8
3config SCSI 9config SCSI
4 tristate "SCSI device support" 10 tristate "SCSI device support"
5 ---help--- 11 ---help---
@@ -250,7 +256,7 @@ config SCSI_DECNCR
250 256
251config SCSI_DECSII 257config SCSI_DECSII
252 tristate "DEC SII Scsi Driver" 258 tristate "DEC SII Scsi Driver"
253 depends on MACH_DECSTATION && SCSI && MIPS32 259 depends on MACH_DECSTATION && SCSI && 32BIT
254 260
255config BLK_DEV_3W_XXXX_RAID 261config BLK_DEV_3W_XXXX_RAID
256 tristate "3ware 5/6/7/8xxx ATA-RAID support" 262 tristate "3ware 5/6/7/8xxx ATA-RAID support"
@@ -459,6 +465,15 @@ config SCSI_ATA_PIIX
459 465
460 If unsure, say N. 466 If unsure, say N.
461 467
468config SCSI_SATA_MV
469 tristate "Marvell SATA support"
470 depends on SCSI_SATA && PCI && EXPERIMENTAL
471 help
472 This option enables support for the Marvell Serial ATA family.
473 Currently supports 88SX[56]0[48][01] chips.
474
475 If unsure, say N.
476
462config SCSI_SATA_NV 477config SCSI_SATA_NV
463 tristate "NVIDIA SATA support" 478 tristate "NVIDIA SATA support"
464 depends on SCSI_SATA && PCI && EXPERIMENTAL 479 depends on SCSI_SATA && PCI && EXPERIMENTAL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3746fb9fa2f5..4b4fd94c2674 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -22,6 +22,8 @@ subdir-$(CONFIG_PCMCIA) += pcmcia
22 22
23obj-$(CONFIG_SCSI) += scsi_mod.o 23obj-$(CONFIG_SCSI) += scsi_mod.o
24 24
25obj-$(CONFIG_RAID_ATTRS) += raid_class.o
26
25# --- NOTE ORDERING HERE --- 27# --- NOTE ORDERING HERE ---
26# For kernel non-modular link, transport attributes need to 28# For kernel non-modular link, transport attributes need to
27# be initialised before drivers 29# be initialised before drivers
@@ -132,6 +134,7 @@ obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
132obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o 134obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
133obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o 135obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
134obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o 136obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
137obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
135 138
136obj-$(CONFIG_ARM) += arm/ 139obj-$(CONFIG_ARM) += arm/
137 140
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f8ec6fe7d858..d40ba0bd68a3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -88,6 +88,13 @@
88 */ 88 */
89#include <scsi/scsi_dbg.h> 89#include <scsi/scsi_dbg.h>
90 90
91#ifndef NDEBUG
92#define NDEBUG 0
93#endif
94#ifndef NDEBUG
95#define NDEBUG_ABORT 0
96#endif
97
91#if (NDEBUG & NDEBUG_LISTS) 98#if (NDEBUG & NDEBUG_LISTS)
92#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } 99#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
93#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } 100#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -359,7 +366,7 @@ static struct {
359 {PHASE_UNKNOWN, "UNKNOWN"} 366 {PHASE_UNKNOWN, "UNKNOWN"}
360}; 367};
361 368
362#ifdef NDEBUG 369#if NDEBUG
363static struct { 370static struct {
364 unsigned char mask; 371 unsigned char mask;
365 const char *name; 372 const char *name;
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 79ae73b23680..e1f2246ee7cd 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -62,7 +62,7 @@
62 62
63#define SYNC_MODE 0 /* Synchronous transfer mode */ 63#define SYNC_MODE 0 /* Synchronous transfer mode */
64 64
65#if DEBUG 65#ifdef DEBUG
66#undef NCR53C406A_DEBUG 66#undef NCR53C406A_DEBUG
67#define NCR53C406A_DEBUG 1 67#define NCR53C406A_DEBUG 1
68#endif 68#endif
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ccdf440021fb..a8e3dfcd0dc7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -133,6 +133,7 @@ struct inquiry_data {
133 133
134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); 134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); 135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
136static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
136static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 137static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
137#ifdef AAC_DETAILED_STATUS_INFO 138#ifdef AAC_DETAILED_STATUS_INFO
138static char *aac_get_status_string(u32 status); 139static char *aac_get_status_string(u32 status);
@@ -348,6 +349,27 @@ static void aac_io_done(struct scsi_cmnd * scsicmd)
348 spin_unlock_irqrestore(host->host_lock, cpu_flags); 349 spin_unlock_irqrestore(host->host_lock, cpu_flags);
349} 350}
350 351
352static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
353{
354 void *buf;
355 unsigned int transfer_len;
356 struct scatterlist *sg = scsicmd->request_buffer;
357
358 if (scsicmd->use_sg) {
359 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
360 transfer_len = min(sg->length, len + offset);
361 } else {
362 buf = scsicmd->request_buffer;
363 transfer_len = min(scsicmd->request_bufflen, len + offset);
364 }
365
366 memcpy(buf + offset, data, transfer_len - offset);
367
368 if (scsicmd->use_sg)
369 kunmap_atomic(buf - sg->offset, KM_IRQ0);
370
371}
372
351static void get_container_name_callback(void *context, struct fib * fibptr) 373static void get_container_name_callback(void *context, struct fib * fibptr)
352{ 374{
353 struct aac_get_name_resp * get_name_reply; 375 struct aac_get_name_resp * get_name_reply;
@@ -363,18 +385,22 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
363 /* Failure is irrelevant, using default value instead */ 385 /* Failure is irrelevant, using default value instead */
364 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 386 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
365 && (get_name_reply->data[0] != '\0')) { 387 && (get_name_reply->data[0] != '\0')) {
366 int count; 388 char *sp = get_name_reply->data;
367 char * dp;
368 char * sp = get_name_reply->data;
369 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; 389 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
370 while (*sp == ' ') 390 while (*sp == ' ')
371 ++sp; 391 ++sp;
372 count = sizeof(((struct inquiry_data *)NULL)->inqd_pid); 392 if (*sp) {
373 dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid; 393 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
374 if (*sp) do { 394 int count = sizeof(d);
375 *dp++ = (*sp) ? *sp++ : ' '; 395 char *dp = d;
376 } while (--count > 0); 396 do {
397 *dp++ = (*sp) ? *sp++ : ' ';
398 } while (--count > 0);
399 aac_internal_transfer(scsicmd, d,
400 offsetof(struct inquiry_data, inqd_pid), sizeof(d));
401 }
377 } 402 }
403
378 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 404 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
379 405
380 fib_complete(fibptr); 406 fib_complete(fibptr);
@@ -777,34 +803,36 @@ int aac_get_adapter_info(struct aac_dev* dev)
777 /* 803 /*
778 * 57 scatter gather elements 804 * 57 scatter gather elements
779 */ 805 */
780 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 806 if (!(dev->raw_io_interface)) {
781 sizeof(struct aac_fibhdr) - 807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
782 sizeof(struct aac_write) + sizeof(struct sgmap)) /
783 sizeof(struct sgmap);
784 if (dev->dac_support) {
785 /*
786 * 38 scatter gather elements
787 */
788 dev->scsi_host_ptr->sg_tablesize =
789 (dev->max_fib_size -
790 sizeof(struct aac_fibhdr) - 808 sizeof(struct aac_fibhdr) -
791 sizeof(struct aac_write64) + 809 sizeof(struct aac_write) + sizeof(struct sgmap)) /
792 sizeof(struct sgmap64)) / 810 sizeof(struct sgmap);
793 sizeof(struct sgmap64); 811 if (dev->dac_support) {
794 } 812 /*
795 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 813 * 38 scatter gather elements
796 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 814 */
797 /* 815 dev->scsi_host_ptr->sg_tablesize =
798 * Worst case size that could cause sg overflow when 816 (dev->max_fib_size -
799 * we break up SG elements that are larger than 64KB. 817 sizeof(struct aac_fibhdr) -
800 * Would be nice if we could tell the SCSI layer what 818 sizeof(struct aac_write64) +
801 * the maximum SG element size can be. Worst case is 819 sizeof(struct sgmap64)) /
802 * (sg_tablesize-1) 4KB elements with one 64KB 820 sizeof(struct sgmap64);
803 * element. 821 }
804 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB 822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
805 */ 823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
806 dev->scsi_host_ptr->max_sectors = 824 /*
807 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 825 * Worst case size that could cause sg overflow when
826 * we break up SG elements that are larger than 64KB.
827 * Would be nice if we could tell the SCSI layer what
828 * the maximum SG element size can be. Worst case is
829 * (sg_tablesize-1) 4KB elements with one 64KB
830 * element.
831 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
832 */
833 dev->scsi_host_ptr->max_sectors =
834 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
835 }
808 } 836 }
809 837
810 fib_complete(fibptr); 838 fib_complete(fibptr);
@@ -814,12 +842,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
814} 842}
815 843
816 844
817static void read_callback(void *context, struct fib * fibptr) 845static void io_callback(void *context, struct fib * fibptr)
818{ 846{
819 struct aac_dev *dev; 847 struct aac_dev *dev;
820 struct aac_read_reply *readreply; 848 struct aac_read_reply *readreply;
821 struct scsi_cmnd *scsicmd; 849 struct scsi_cmnd *scsicmd;
822 u32 lba;
823 u32 cid; 850 u32 cid;
824 851
825 scsicmd = (struct scsi_cmnd *) context; 852 scsicmd = (struct scsi_cmnd *) context;
@@ -827,8 +854,7 @@ static void read_callback(void *context, struct fib * fibptr)
827 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 854 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
828 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
829 856
830 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
831 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
832 858
833 if (fibptr == NULL) 859 if (fibptr == NULL)
834 BUG(); 860 BUG();
@@ -847,7 +873,7 @@ static void read_callback(void *context, struct fib * fibptr)
847 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 873 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
848 else { 874 else {
849#ifdef AAC_DETAILED_STATUS_INFO 875#ifdef AAC_DETAILED_STATUS_INFO
850 printk(KERN_WARNING "read_callback: io failed, status = %d\n", 876 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
851 le32_to_cpu(readreply->status)); 877 le32_to_cpu(readreply->status));
852#endif 878#endif
853 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 879 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
@@ -867,53 +893,6 @@ static void read_callback(void *context, struct fib * fibptr)
867 aac_io_done(scsicmd); 893 aac_io_done(scsicmd);
868} 894}
869 895
870static void write_callback(void *context, struct fib * fibptr)
871{
872 struct aac_dev *dev;
873 struct aac_write_reply *writereply;
874 struct scsi_cmnd *scsicmd;
875 u32 lba;
876 u32 cid;
877
878 scsicmd = (struct scsi_cmnd *) context;
879 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
880 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
881
882 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
883 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
884 if (fibptr == NULL)
885 BUG();
886
887 if(scsicmd->use_sg)
888 pci_unmap_sg(dev->pdev,
889 (struct scatterlist *)scsicmd->buffer,
890 scsicmd->use_sg,
891 scsicmd->sc_data_direction);
892 else if(scsicmd->request_bufflen)
893 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
894 scsicmd->request_bufflen,
895 scsicmd->sc_data_direction);
896
897 writereply = (struct aac_write_reply *) fib_data(fibptr);
898 if (le32_to_cpu(writereply->status) == ST_OK)
899 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
900 else {
901 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
902 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
903 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
904 HARDWARE_ERROR,
905 SENCODE_INTERNAL_TARGET_FAILURE,
906 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
907 0, 0);
908 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
909 sizeof(struct sense_data));
910 }
911
912 fib_complete(fibptr);
913 fib_free(fibptr);
914 aac_io_done(scsicmd);
915}
916
917static int aac_read(struct scsi_cmnd * scsicmd, int cid) 896static int aac_read(struct scsi_cmnd * scsicmd, int cid)
918{ 897{
919 u32 lba; 898 u32 lba;
@@ -954,7 +933,32 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
954 933
955 fib_init(cmd_fibcontext); 934 fib_init(cmd_fibcontext);
956 935
957 if (dev->dac_support == 1) { 936 if (dev->raw_io_interface) {
937 struct aac_raw_io *readcmd;
938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
939 readcmd->block[0] = cpu_to_le32(lba);
940 readcmd->block[1] = 0;
941 readcmd->count = cpu_to_le32(count<<9);
942 readcmd->cid = cpu_to_le16(cid);
943 readcmd->flags = cpu_to_le16(1);
944 readcmd->bpTotal = 0;
945 readcmd->bpComplete = 0;
946
947 aac_build_sgraw(scsicmd, &readcmd->sg);
948 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
949 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
950 BUG();
951 /*
952 * Now send the Fib to the adapter
953 */
954 status = fib_send(ContainerRawIo,
955 cmd_fibcontext,
956 fibsize,
957 FsaNormal,
958 0, 1,
959 (fib_callback) io_callback,
960 (void *) scsicmd);
961 } else if (dev->dac_support == 1) {
958 struct aac_read64 *readcmd; 962 struct aac_read64 *readcmd;
959 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); 963 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
960 readcmd->command = cpu_to_le32(VM_CtHostRead64); 964 readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -968,7 +972,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
968 fibsize = sizeof(struct aac_read64) + 972 fibsize = sizeof(struct aac_read64) +
969 ((le32_to_cpu(readcmd->sg.count) - 1) * 973 ((le32_to_cpu(readcmd->sg.count) - 1) *
970 sizeof (struct sgentry64)); 974 sizeof (struct sgentry64));
971 BUG_ON (fibsize > (sizeof(struct hw_fib) - 975 BUG_ON (fibsize > (dev->max_fib_size -
972 sizeof(struct aac_fibhdr))); 976 sizeof(struct aac_fibhdr)));
973 /* 977 /*
974 * Now send the Fib to the adapter 978 * Now send the Fib to the adapter
@@ -978,7 +982,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
978 fibsize, 982 fibsize,
979 FsaNormal, 983 FsaNormal,
980 0, 1, 984 0, 1,
981 (fib_callback) read_callback, 985 (fib_callback) io_callback,
982 (void *) scsicmd); 986 (void *) scsicmd);
983 } else { 987 } else {
984 struct aac_read *readcmd; 988 struct aac_read *readcmd;
@@ -1002,7 +1006,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1002 fibsize, 1006 fibsize,
1003 FsaNormal, 1007 FsaNormal,
1004 0, 1, 1008 0, 1,
1005 (fib_callback) read_callback, 1009 (fib_callback) io_callback,
1006 (void *) scsicmd); 1010 (void *) scsicmd);
1007 } 1011 }
1008 1012
@@ -1061,7 +1065,32 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1061 } 1065 }
1062 fib_init(cmd_fibcontext); 1066 fib_init(cmd_fibcontext);
1063 1067
1064 if(dev->dac_support == 1) { 1068 if (dev->raw_io_interface) {
1069 struct aac_raw_io *writecmd;
1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1071 writecmd->block[0] = cpu_to_le32(lba);
1072 writecmd->block[1] = 0;
1073 writecmd->count = cpu_to_le32(count<<9);
1074 writecmd->cid = cpu_to_le16(cid);
1075 writecmd->flags = 0;
1076 writecmd->bpTotal = 0;
1077 writecmd->bpComplete = 0;
1078
1079 aac_build_sgraw(scsicmd, &writecmd->sg);
1080 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
1081 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
1082 BUG();
1083 /*
1084 * Now send the Fib to the adapter
1085 */
1086 status = fib_send(ContainerRawIo,
1087 cmd_fibcontext,
1088 fibsize,
1089 FsaNormal,
1090 0, 1,
1091 (fib_callback) io_callback,
1092 (void *) scsicmd);
1093 } else if (dev->dac_support == 1) {
1065 struct aac_write64 *writecmd; 1094 struct aac_write64 *writecmd;
1066 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); 1095 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
1067 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1085,7 +1114,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1085 fibsize, 1114 fibsize,
1086 FsaNormal, 1115 FsaNormal,
1087 0, 1, 1116 0, 1,
1088 (fib_callback) write_callback, 1117 (fib_callback) io_callback,
1089 (void *) scsicmd); 1118 (void *) scsicmd);
1090 } else { 1119 } else {
1091 struct aac_write *writecmd; 1120 struct aac_write *writecmd;
@@ -1111,7 +1140,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1111 fibsize, 1140 fibsize,
1112 FsaNormal, 1141 FsaNormal,
1113 0, 1, 1142 0, 1,
1114 (fib_callback) write_callback, 1143 (fib_callback) io_callback,
1115 (void *) scsicmd); 1144 (void *) scsicmd);
1116 } 1145 }
1117 1146
@@ -1340,44 +1369,45 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1340 switch (scsicmd->cmnd[0]) { 1369 switch (scsicmd->cmnd[0]) {
1341 case INQUIRY: 1370 case INQUIRY:
1342 { 1371 {
1343 struct inquiry_data *inq_data_ptr; 1372 struct inquiry_data inq_data;
1344 1373
1345 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id)); 1374 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
1346 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer; 1375 memset(&inq_data, 0, sizeof (struct inquiry_data));
1347 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
1348 1376
1349 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */ 1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1350 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ 1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1351 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1352 inq_data_ptr->inqd_len = 31; 1380 inq_data.inqd_len = 31;
1353 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
1354 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ 1382 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
1355 /* 1383 /*
1356 * Set the Vendor, Product, and Revision Level 1384 * Set the Vendor, Product, and Revision Level
1357 * see: <vendor>.c i.e. aac.c 1385 * see: <vendor>.c i.e. aac.c
1358 */ 1386 */
1359 if (scsicmd->device->id == host->this_id) { 1387 if (scsicmd->device->id == host->this_id) {
1360 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *))); 1388 setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *)));
1361 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */ 1389 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
1390 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1362 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1391 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1363 scsicmd->scsi_done(scsicmd); 1392 scsicmd->scsi_done(scsicmd);
1364 return 0; 1393 return 0;
1365 } 1394 }
1366 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type); 1395 setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1367 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1396 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1368 return aac_get_container_name(scsicmd, cid); 1398 return aac_get_container_name(scsicmd, cid);
1369 } 1399 }
1370 case READ_CAPACITY: 1400 case READ_CAPACITY:
1371 { 1401 {
1372 u32 capacity; 1402 u32 capacity;
1373 char *cp; 1403 char cp[8];
1374 1404
1375 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1376 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL)
1377 capacity = fsa_dev_ptr[cid].size - 1; 1407 capacity = fsa_dev_ptr[cid].size - 1;
1378 else 1408 else
1379 capacity = (u32)-1; 1409 capacity = (u32)-1;
1380 cp = scsicmd->request_buffer; 1410
1381 cp[0] = (capacity >> 24) & 0xff; 1411 cp[0] = (capacity >> 24) & 0xff;
1382 cp[1] = (capacity >> 16) & 0xff; 1412 cp[1] = (capacity >> 16) & 0xff;
1383 cp[2] = (capacity >> 8) & 0xff; 1413 cp[2] = (capacity >> 8) & 0xff;
@@ -1386,6 +1416,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1386 cp[5] = 0; 1416 cp[5] = 0;
1387 cp[6] = 2; 1417 cp[6] = 2;
1388 cp[7] = 0; 1418 cp[7] = 0;
1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1389 1420
1390 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1391 scsicmd->scsi_done(scsicmd); 1422 scsicmd->scsi_done(scsicmd);
@@ -1395,15 +1426,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1395 1426
1396 case MODE_SENSE: 1427 case MODE_SENSE:
1397 { 1428 {
1398 char *mode_buf; 1429 char mode_buf[4];
1399 1430
1400 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 1431 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1401 mode_buf = scsicmd->request_buffer;
1402 mode_buf[0] = 3; /* Mode data length */ 1432 mode_buf[0] = 3; /* Mode data length */
1403 mode_buf[1] = 0; /* Medium type - default */ 1433 mode_buf[1] = 0; /* Medium type - default */
1404 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1434 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1405 mode_buf[3] = 0; /* Block descriptor length */ 1435 mode_buf[3] = 0; /* Block descriptor length */
1406 1436
1437 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1407 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1438 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1408 scsicmd->scsi_done(scsicmd); 1439 scsicmd->scsi_done(scsicmd);
1409 1440
@@ -1411,10 +1442,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1411 } 1442 }
1412 case MODE_SENSE_10: 1443 case MODE_SENSE_10:
1413 { 1444 {
1414 char *mode_buf; 1445 char mode_buf[8];
1415 1446
1416 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 1447 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1417 mode_buf = scsicmd->request_buffer;
1418 mode_buf[0] = 0; /* Mode data length (MSB) */ 1448 mode_buf[0] = 0; /* Mode data length (MSB) */
1419 mode_buf[1] = 6; /* Mode data length (LSB) */ 1449 mode_buf[1] = 6; /* Mode data length (LSB) */
1420 mode_buf[2] = 0; /* Medium type - default */ 1450 mode_buf[2] = 0; /* Medium type - default */
@@ -1423,6 +1453,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1423 mode_buf[5] = 0; /* reserved */ 1453 mode_buf[5] = 0; /* reserved */
1424 mode_buf[6] = 0; /* Block descriptor length (MSB) */ 1454 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1425 mode_buf[7] = 0; /* Block descriptor length (LSB) */ 1455 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1456 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1426 1457
1427 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1458 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1428 scsicmd->scsi_done(scsicmd); 1459 scsicmd->scsi_done(scsicmd);
@@ -1894,7 +1925,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
1894 srbcmd->id = cpu_to_le32(scsicmd->device->id); 1925 srbcmd->id = cpu_to_le32(scsicmd->device->id);
1895 srbcmd->lun = cpu_to_le32(scsicmd->device->lun); 1926 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
1896 srbcmd->flags = cpu_to_le32(flag); 1927 srbcmd->flags = cpu_to_le32(flag);
1897 timeout = (scsicmd->timeout-jiffies)/HZ; 1928 timeout = scsicmd->timeout_per_command/HZ;
1898 if(timeout == 0){ 1929 if(timeout == 0){
1899 timeout = 1; 1930 timeout = 1;
1900 } 1931 }
@@ -2077,6 +2108,76 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
2077 return byte_count; 2108 return byte_count;
2078} 2109}
2079 2110
2111static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
2112{
2113 struct Scsi_Host *host = scsicmd->device->host;
2114 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2115 unsigned long byte_count = 0;
2116
2117 // Get rid of old data
2118 psg->count = 0;
2119 psg->sg[0].next = 0;
2120 psg->sg[0].prev = 0;
2121 psg->sg[0].addr[0] = 0;
2122 psg->sg[0].addr[1] = 0;
2123 psg->sg[0].count = 0;
2124 psg->sg[0].flags = 0;
2125 if (scsicmd->use_sg) {
2126 struct scatterlist *sg;
2127 int i;
2128 int sg_count;
2129 sg = (struct scatterlist *) scsicmd->request_buffer;
2130
2131 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2132 scsicmd->sc_data_direction);
2133
2134 for (i = 0; i < sg_count; i++) {
2135 int count = sg_dma_len(sg);
2136 u64 addr = sg_dma_address(sg);
2137 psg->sg[i].next = 0;
2138 psg->sg[i].prev = 0;
2139 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
2140 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2141 psg->sg[i].count = cpu_to_le32(count);
2142 psg->sg[i].flags = 0;
2143 byte_count += count;
2144 sg++;
2145 }
2146 psg->count = cpu_to_le32(sg_count);
2147 /* hba wants the size to be exact */
2148 if(byte_count > scsicmd->request_bufflen){
2149 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
2150 (byte_count - scsicmd->request_bufflen);
2151 psg->sg[i-1].count = cpu_to_le32(temp);
2152 byte_count = scsicmd->request_bufflen;
2153 }
2154 /* Check for command underflow */
2155 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
2156 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
2157 byte_count, scsicmd->underflow);
2158 }
2159 }
2160 else if(scsicmd->request_bufflen) {
2161 int count;
2162 u64 addr;
2163 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2164 scsicmd->request_buffer,
2165 scsicmd->request_bufflen,
2166 scsicmd->sc_data_direction);
2167 addr = scsicmd->SCp.dma_handle;
2168 count = scsicmd->request_bufflen;
2169 psg->count = cpu_to_le32(1);
2170 psg->sg[0].next = 0;
2171 psg->sg[0].prev = 0;
2172 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
2173 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2174 psg->sg[0].count = cpu_to_le32(count);
2175 psg->sg[0].flags = 0;
2176 byte_count = scsicmd->request_bufflen;
2177 }
2178 return byte_count;
2179}
2180
2080#ifdef AAC_DETAILED_STATUS_INFO 2181#ifdef AAC_DETAILED_STATUS_INFO
2081 2182
2082struct aac_srb_status_info { 2183struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4ab07861b457..e40528185d48 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -110,6 +110,22 @@ struct user_sgentry64 {
110 u32 count; /* Length. */ 110 u32 count; /* Length. */
111}; 111};
112 112
113struct sgentryraw {
114 __le32 next; /* reserved for F/W use */
115 __le32 prev; /* reserved for F/W use */
116 __le32 addr[2];
117 __le32 count;
118 __le32 flags; /* reserved for F/W use */
119};
120
121struct user_sgentryraw {
122 u32 next; /* reserved for F/W use */
123 u32 prev; /* reserved for F/W use */
124 u32 addr[2];
125 u32 count;
126 u32 flags; /* reserved for F/W use */
127};
128
113/* 129/*
114 * SGMAP 130 * SGMAP
115 * 131 *
@@ -137,6 +153,16 @@ struct user_sgmap64 {
137 struct user_sgentry64 sg[1]; 153 struct user_sgentry64 sg[1];
138}; 154};
139 155
156struct sgmapraw {
157 __le32 count;
158 struct sgentryraw sg[1];
159};
160
161struct user_sgmapraw {
162 u32 count;
163 struct user_sgentryraw sg[1];
164};
165
140struct creation_info 166struct creation_info
141{ 167{
142 u8 buildnum; /* e.g., 588 */ 168 u8 buildnum; /* e.g., 588 */
@@ -351,6 +377,7 @@ struct hw_fib {
351 */ 377 */
352#define ContainerCommand 500 378#define ContainerCommand 500
353#define ContainerCommand64 501 379#define ContainerCommand64 501
380#define ContainerRawIo 502
354/* 381/*
355 * Cluster Commands 382 * Cluster Commands
356 */ 383 */
@@ -456,6 +483,7 @@ struct adapter_ops
456{ 483{
457 void (*adapter_interrupt)(struct aac_dev *dev); 484 void (*adapter_interrupt)(struct aac_dev *dev);
458 void (*adapter_notify)(struct aac_dev *dev, u32 event); 485 void (*adapter_notify)(struct aac_dev *dev, u32 event);
486 void (*adapter_disable_int)(struct aac_dev *dev);
459 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 487 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
460 int (*adapter_check_health)(struct aac_dev *dev); 488 int (*adapter_check_health)(struct aac_dev *dev);
461}; 489};
@@ -981,6 +1009,9 @@ struct aac_dev
981 u8 nondasd_support; 1009 u8 nondasd_support;
982 u8 dac_support; 1010 u8 dac_support;
983 u8 raid_scsi_mode; 1011 u8 raid_scsi_mode;
1012 /* macro side-effects BEWARE */
1013# define raw_io_interface \
1014 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
984 u8 printf_enabled; 1015 u8 printf_enabled;
985}; 1016};
986 1017
@@ -990,6 +1021,9 @@ struct aac_dev
990#define aac_adapter_notify(dev, event) \ 1021#define aac_adapter_notify(dev, event) \
991 (dev)->a_ops.adapter_notify(dev, event) 1022 (dev)->a_ops.adapter_notify(dev, event)
992 1023
1024#define aac_adapter_disable_int(dev) \
1025 (dev)->a_ops.adapter_disable_int(dev)
1026
993#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1027#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
994 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1028 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
995 1029
@@ -1156,6 +1190,17 @@ struct aac_write_reply
1156 __le32 committed; 1190 __le32 committed;
1157}; 1191};
1158 1192
1193struct aac_raw_io
1194{
1195 __le32 block[2];
1196 __le32 count;
1197 __le16 cid;
1198 __le16 flags; /* 00 W, 01 R */
1199 __le16 bpTotal; /* reserved for F/W use */
1200 __le16 bpComplete; /* reserved for F/W use */
1201 struct sgmapraw sg;
1202};
1203
1159#define CT_FLUSH_CACHE 129 1204#define CT_FLUSH_CACHE 129
1160struct aac_synchronize { 1205struct aac_synchronize {
1161 __le32 command; /* VM_ContainerConfig */ 1206 __le32 command; /* VM_ContainerConfig */
@@ -1196,7 +1241,7 @@ struct aac_srb
1196}; 1241};
1197 1242
1198/* 1243/*
1199 * This and assocated data structs are used by the 1244 * This and associated data structs are used by the
1200 * ioctl caller and are in cpu order. 1245 * ioctl caller and are in cpu order.
1201 */ 1246 */
1202struct user_aac_srb 1247struct user_aac_srb
@@ -1508,11 +1553,12 @@ struct fib_ioctl
1508 1553
1509struct revision 1554struct revision
1510{ 1555{
1511 u32 compat; 1556 __le32 compat;
1512 u32 version; 1557 __le32 version;
1513 u32 build; 1558 __le32 build;
1514}; 1559};
1515 1560
1561
1516/* 1562/*
1517 * Ugly - non Linux like ioctl coding for back compat. 1563 * Ugly - non Linux like ioctl coding for back compat.
1518 */ 1564 */
@@ -1733,3 +1779,4 @@ int aac_get_adapter_info(struct aac_dev* dev);
1733int aac_send_shutdown(struct aac_dev *dev); 1779int aac_send_shutdown(struct aac_dev *dev);
1734extern int numacb; 1780extern int numacb;
1735extern int acbsize; 1781extern int acbsize;
1782extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 85387099aab2..71f1cad9b5f0 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -287,7 +287,6 @@ return_fib:
287 kfree(fib->hw_fib); 287 kfree(fib->hw_fib);
288 kfree(fib); 288 kfree(fib);
289 status = 0; 289 status = 0;
290 fibctx->jiffies = jiffies/HZ;
291 } else { 290 } else {
292 spin_unlock_irqrestore(&dev->fib_lock, flags); 291 spin_unlock_irqrestore(&dev->fib_lock, flags);
293 if (f.wait) { 292 if (f.wait) {
@@ -302,6 +301,7 @@ return_fib:
302 status = -EAGAIN; 301 status = -EAGAIN;
303 } 302 }
304 } 303 }
304 fibctx->jiffies = jiffies/HZ;
305 return status; 305 return status;
306} 306}
307 307
@@ -405,10 +405,20 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
405static int check_revision(struct aac_dev *dev, void __user *arg) 405static int check_revision(struct aac_dev *dev, void __user *arg)
406{ 406{
407 struct revision response; 407 struct revision response;
408 408 char *driver_version = aac_driver_version;
409 response.compat = 1; 409 u32 version;
410 response.version = le32_to_cpu(dev->adapter_info.kernelrev); 410
411 response.build = le32_to_cpu(dev->adapter_info.kernelbuild); 411 response.compat = cpu_to_le32(1);
412 version = (simple_strtol(driver_version,
413 &driver_version, 10) << 24) | 0x00000400;
414 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
415 version += simple_strtol(driver_version + 1, NULL, 10);
416 response.version = cpu_to_le32(version);
417# if (defined(AAC_DRIVER_BUILD))
418 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
419# else
420 response.build = cpu_to_le32(9999);
421# endif
412 422
413 if (copy_to_user(arg, &response, sizeof(response))) 423 if (copy_to_user(arg, &response, sizeof(response)))
414 return -EFAULT; 424 return -EFAULT;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 43557bf661f6..75abd0453289 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -44,7 +44,9 @@
44 44
45#include "aacraid.h" 45#include "aacraid.h"
46 46
47struct aac_common aac_config; 47struct aac_common aac_config = {
48 .irq_mod = 1
49};
48 50
49static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) 51static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
50{ 52{
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5322865942e2..a1d303f03480 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -254,6 +254,7 @@ static void fib_dealloc(struct fib * fibptr)
254static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 254static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255{ 255{
256 struct aac_queue * q; 256 struct aac_queue * q;
257 unsigned long idx;
257 258
258 /* 259 /*
259 * All of the queues wrap when they reach the end, so we check 260 * All of the queues wrap when they reach the end, so we check
@@ -263,10 +264,23 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
263 */ 264 */
264 265
265 q = &dev->queues->queue[qid]; 266 q = &dev->queues->queue[qid];
266 267
267 *index = le32_to_cpu(*(q->headers.producer)); 268 idx = *index = le32_to_cpu(*(q->headers.producer));
268 if ((*index - 2) == le32_to_cpu(*(q->headers.consumer))) 269 /* Interrupt Moderation, only interrupt for first two entries */
270 if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 if (--idx == 0) {
272 if (qid == AdapHighCmdQueue)
273 idx = ADAP_HIGH_CMD_ENTRIES;
274 else if (qid == AdapNormCmdQueue)
275 idx = ADAP_NORM_CMD_ENTRIES;
276 else if (qid == AdapHighRespQueue)
277 idx = ADAP_HIGH_RESP_ENTRIES;
278 else if (qid == AdapNormRespQueue)
279 idx = ADAP_NORM_RESP_ENTRIES;
280 }
281 if (idx != le32_to_cpu(*(q->headers.consumer)))
269 *nonotify = 1; 282 *nonotify = 1;
283 }
270 284
271 if (qid == AdapHighCmdQueue) { 285 if (qid == AdapHighCmdQueue) {
272 if (*index >= ADAP_HIGH_CMD_ENTRIES) 286 if (*index >= ADAP_HIGH_CMD_ENTRIES)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 562da90480a1..4ff29d7f5825 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -27,8 +27,11 @@
27 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller 27 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
28 */ 28 */
29 29
30#define AAC_DRIVER_VERSION "1.1.2-lk2" 30#define AAC_DRIVER_VERSION "1.1-4"
31#define AAC_DRIVER_BUILD_DATE __DATE__ 31#ifndef AAC_DRIVER_BRANCH
32#define AAC_DRIVER_BRANCH ""
33#endif
34#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
32#define AAC_DRIVERNAME "aacraid" 35#define AAC_DRIVERNAME "aacraid"
33 36
34#include <linux/compat.h> 37#include <linux/compat.h>
@@ -58,16 +61,24 @@
58 61
59#include "aacraid.h" 62#include "aacraid.h"
60 63
64#ifdef AAC_DRIVER_BUILD
65#define _str(x) #x
66#define str(x) _str(x)
67#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
68#else
69#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE
70#endif
61 71
62MODULE_AUTHOR("Red Hat Inc and Adaptec"); 72MODULE_AUTHOR("Red Hat Inc and Adaptec");
63MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " 73MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
64 "Adaptec Advanced Raid Products, " 74 "Adaptec Advanced Raid Products, "
65 "and HP NetRAID-4M SCSI driver"); 75 "and HP NetRAID-4M SCSI driver");
66MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
67MODULE_VERSION(AAC_DRIVER_VERSION); 77MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
68 78
69static LIST_HEAD(aac_devices); 79static LIST_HEAD(aac_devices);
70static int aac_cfg_major = -1; 80static int aac_cfg_major = -1;
81char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
71 82
72/* 83/*
73 * Because of the way Linux names scsi devices, the order in this table has 84 * Because of the way Linux names scsi devices, the order in this table has
@@ -109,36 +120,39 @@ static struct pci_device_id aac_pci_tbl[] = {
109 { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */ 120 { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */
110 { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */ 121 { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
111 { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */ 122 { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
112 { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 32 }, /* Themisto Jupiter Platform */ 123 { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
113 { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 32 }, /* Themisto Jupiter Platform */ 124 { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
114 { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 33 }, /* Callisto Jupiter Platform */ 125 { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
115 { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 34 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ 126 { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
116 { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 35 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ 127 { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
117 { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 36 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ 128 { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
118 { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 37 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ 129 { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
119 { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 38 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ 130 { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
120 { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 39 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ 131 { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
121 { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 40 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ 132 { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
122 { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 41 }, /* AAR-2610SA PCI SATA 6ch */ 133 { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
123 { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 42 }, /* ASR-2240S (SabreExpress) */ 134 { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
124 { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 43 }, /* ASR-4005SAS */ 135 { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
125 { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 44 }, /* IBM 8i (AvonPark) */ 136 { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005SAS */
126 { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 44 }, /* IBM 8i (AvonPark Lite) */ 137 { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
127 { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 45 }, /* ASR-4000SAS (BlackBird) */ 138 { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
128 { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 46 }, /* ASR-4800SAS (Marauder-X) */ 139 { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
129 { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 47 }, /* ASR-4805SAS (Marauder-E) */ 140 { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
130 { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 48 }, /* ASR-4810SAS (Hurricane */ 141 { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000SAS (BlackBird) */
131 142 { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
132 { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 49 }, /* Perc 320/DC*/ 143 { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
133 { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 50 }, /* Adaptec 5400S (Mustang)*/ 144 { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-4810SAS (Hurricane */
134 { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 51 }, /* Adaptec 5400S (Mustang)*/ 145
135 { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 52 }, /* Dell PERC2/QC */ 146 { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
136 { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 53 }, /* HP NetRAID-4M */ 147 { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
137 148 { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
138 { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 54 }, /* Dell Catchall */ 149 { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
139 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 55 }, /* Legend Catchall */ 150 { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
140 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 56 }, /* Adaptec Catch All */ 151
141 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 57 }, /* Adaptec Rocket Catch All */ 152 { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
153 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
154 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
155 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
142 { 0,} 156 { 0,}
143}; 157};
144MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 158MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -180,8 +194,9 @@ static struct aac_driver_ident aac_drivers[] = {
180 { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */ 194 { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
181 { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */ 195 { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
182 { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */ 196 { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */
183 { aac_rkt_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */ 197 { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
184 { aac_rkt_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */ 198 { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
199 { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
185 { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */ 200 { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
186 { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */ 201 { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
187 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ 202 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
@@ -195,10 +210,12 @@ static struct aac_driver_ident aac_drivers[] = {
195 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */ 210 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
196 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */ 211 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
197 { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */ 212 { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
213 { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
214 { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
198 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */ 215 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
199 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */ 216 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
200 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */ 217 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
201 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */ 218 { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */
202 219
203 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ 220 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
204 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ 221 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
@@ -839,11 +856,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
839 856
840 return 0; 857 return 0;
841 858
842out_deinit: 859 out_deinit:
843 kill_proc(aac->thread_pid, SIGKILL, 0); 860 kill_proc(aac->thread_pid, SIGKILL, 0);
844 wait_for_completion(&aac->aif_completion); 861 wait_for_completion(&aac->aif_completion);
845 862
846 aac_send_shutdown(aac); 863 aac_send_shutdown(aac);
864 aac_adapter_disable_int(aac);
847 fib_map_free(aac); 865 fib_map_free(aac);
848 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 866 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
849 kfree(aac->queues); 867 kfree(aac->queues);
@@ -860,6 +878,13 @@ out_deinit:
860 return error; 878 return error;
861} 879}
862 880
881static void aac_shutdown(struct pci_dev *dev)
882{
883 struct Scsi_Host *shost = pci_get_drvdata(dev);
884 struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
885 aac_send_shutdown(aac);
886}
887
863static void __devexit aac_remove_one(struct pci_dev *pdev) 888static void __devexit aac_remove_one(struct pci_dev *pdev)
864{ 889{
865 struct Scsi_Host *shost = pci_get_drvdata(pdev); 890 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -871,6 +896,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
871 wait_for_completion(&aac->aif_completion); 896 wait_for_completion(&aac->aif_completion);
872 897
873 aac_send_shutdown(aac); 898 aac_send_shutdown(aac);
899 aac_adapter_disable_int(aac);
874 fib_map_free(aac); 900 fib_map_free(aac);
875 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 901 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
876 aac->comm_phys); 902 aac->comm_phys);
@@ -891,14 +917,15 @@ static struct pci_driver aac_pci_driver = {
891 .id_table = aac_pci_tbl, 917 .id_table = aac_pci_tbl,
892 .probe = aac_probe_one, 918 .probe = aac_probe_one,
893 .remove = __devexit_p(aac_remove_one), 919 .remove = __devexit_p(aac_remove_one),
920 .shutdown = aac_shutdown,
894}; 921};
895 922
896static int __init aac_init(void) 923static int __init aac_init(void)
897{ 924{
898 int error; 925 int error;
899 926
900 printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", 927 printk(KERN_INFO "Adaptec %s driver (%s)\n",
901 AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE); 928 AAC_DRIVERNAME, aac_driver_version);
902 929
903 error = pci_module_init(&aac_pci_driver); 930 error = pci_module_init(&aac_pci_driver);
904 if (error) 931 if (error)
@@ -909,6 +936,7 @@ static int __init aac_init(void)
909 printk(KERN_WARNING 936 printk(KERN_WARNING
910 "aacraid: unable to register \"aac\" device.\n"); 937 "aacraid: unable to register \"aac\" device.\n");
911 } 938 }
939
912 return 0; 940 return 0;
913} 941}
914 942
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 7d68b7825137..557287a0b80b 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -88,6 +88,16 @@ static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
88} 88}
89 89
90/** 90/**
91 * aac_rkt_disable_interrupt - Disable interrupts
92 * @dev: Adapter
93 */
94
95static void aac_rkt_disable_interrupt(struct aac_dev *dev)
96{
97 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
98}
99
100/**
91 * rkt_sync_cmd - send a command and wait 101 * rkt_sync_cmd - send a command and wait
92 * @dev: Adapter 102 * @dev: Adapter
93 * @command: Command to execute 103 * @command: Command to execute
@@ -412,10 +422,19 @@ int aac_rkt_init(struct aac_dev *dev)
412 * Fill in the function dispatch table. 422 * Fill in the function dispatch table.
413 */ 423 */
414 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; 424 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
425 dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
415 dev->a_ops.adapter_notify = aac_rkt_notify_adapter; 426 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
416 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd; 427 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
417 dev->a_ops.adapter_check_health = aac_rkt_check_health; 428 dev->a_ops.adapter_check_health = aac_rkt_check_health;
418 429
430 /*
431 * First clear out all interrupts. Then enable the one's that we
432 * can handle.
433 */
434 rkt_writeb(dev, MUnit.OIMR, 0xff);
435 rkt_writel(dev, MUnit.ODR, 0xffffffff);
436 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
437
419 if (aac_init_adapter(dev) == NULL) 438 if (aac_init_adapter(dev) == NULL)
420 goto error_irq; 439 goto error_irq;
421 /* 440 /*
@@ -438,6 +457,7 @@ error_kfree:
438 kfree(dev->queues); 457 kfree(dev->queues);
439 458
440error_irq: 459error_irq:
460 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
441 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 461 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
442 462
443error_iounmap: 463error_iounmap:
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 1ff25f49fada..a8459faf87ca 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -88,6 +88,16 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
88} 88}
89 89
90/** 90/**
91 * aac_rx_disable_interrupt - Disable interrupts
92 * @dev: Adapter
93 */
94
95static void aac_rx_disable_interrupt(struct aac_dev *dev)
96{
97 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
98}
99
100/**
91 * rx_sync_cmd - send a command and wait 101 * rx_sync_cmd - send a command and wait
92 * @dev: Adapter 102 * @dev: Adapter
93 * @command: Command to execute 103 * @command: Command to execute
@@ -412,10 +422,19 @@ int aac_rx_init(struct aac_dev *dev)
412 * Fill in the function dispatch table. 422 * Fill in the function dispatch table.
413 */ 423 */
414 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 424 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
425 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
415 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 426 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
416 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 427 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
417 dev->a_ops.adapter_check_health = aac_rx_check_health; 428 dev->a_ops.adapter_check_health = aac_rx_check_health;
418 429
430 /*
431 * First clear out all interrupts. Then enable the one's that we
432 * can handle.
433 */
434 rx_writeb(dev, MUnit.OIMR, 0xff);
435 rx_writel(dev, MUnit.ODR, 0xffffffff);
436 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
437
419 if (aac_init_adapter(dev) == NULL) 438 if (aac_init_adapter(dev) == NULL)
420 goto error_irq; 439 goto error_irq;
421 /* 440 /*
@@ -438,6 +457,7 @@ error_kfree:
438 kfree(dev->queues); 457 kfree(dev->queues);
439 458
440error_irq: 459error_irq:
460 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
441 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 461 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
442 462
443error_iounmap: 463error_iounmap:
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 0680249ab861..3900abc5850d 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -82,6 +82,16 @@ static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
82} 82}
83 83
84/** 84/**
85 * aac_sa_disable_interrupt - disable interrupt
86 * @dev: Which adapter to enable.
87 */
88
89static void aac_sa_disable_interrupt (struct aac_dev *dev)
90{
91 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
92}
93
94/**
85 * aac_sa_notify_adapter - handle adapter notification 95 * aac_sa_notify_adapter - handle adapter notification
86 * @dev: Adapter that notification is for 96 * @dev: Adapter that notification is for
87 * @event: Event to notidy 97 * @event: Event to notidy
@@ -214,9 +224,8 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
214 224
215static void aac_sa_interrupt_adapter (struct aac_dev *dev) 225static void aac_sa_interrupt_adapter (struct aac_dev *dev)
216{ 226{
217 u32 ret;
218 sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, 227 sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
219 &ret, NULL, NULL, NULL, NULL); 228 NULL, NULL, NULL, NULL, NULL);
220} 229}
221 230
222/** 231/**
@@ -352,10 +361,18 @@ int aac_sa_init(struct aac_dev *dev)
352 */ 361 */
353 362
354 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; 363 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
364 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
355 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 365 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
356 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 366 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
357 dev->a_ops.adapter_check_health = aac_sa_check_health; 367 dev->a_ops.adapter_check_health = aac_sa_check_health;
358 368
369 /*
370 * First clear out all interrupts. Then enable the one's that
371 * we can handle.
372 */
373 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
374 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
375 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
359 376
360 if(aac_init_adapter(dev) == NULL) 377 if(aac_init_adapter(dev) == NULL)
361 goto error_irq; 378 goto error_irq;
@@ -381,6 +398,7 @@ error_kfree:
381 kfree(dev->queues); 398 kfree(dev->queues);
382 399
383error_irq: 400error_irq:
401 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
384 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 402 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
385 403
386error_iounmap: 404error_iounmap:
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 0fb93363eb22..37ec5411e325 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -9200,8 +9200,8 @@ asc_prt_scsi_cmnd(struct scsi_cmnd *s)
9200 (unsigned) s->serial_number, s->retries, s->allowed); 9200 (unsigned) s->serial_number, s->retries, s->allowed);
9201 9201
9202 printk( 9202 printk(
9203" timeout_per_command %d, timeout_total %d, timeout %d\n", 9203" timeout_per_command %d\n",
9204 s->timeout_per_command, s->timeout_total, s->timeout); 9204 s->timeout_per_command);
9205 9205
9206 printk( 9206 printk(
9207" scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n", 9207" scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n",
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 179c95c878ac..320df6cd3def 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -189,7 +189,6 @@ static void ahci_irq_clear(struct ata_port *ap);
189static void ahci_eng_timeout(struct ata_port *ap); 189static void ahci_eng_timeout(struct ata_port *ap);
190static int ahci_port_start(struct ata_port *ap); 190static int ahci_port_start(struct ata_port *ap);
191static void ahci_port_stop(struct ata_port *ap); 191static void ahci_port_stop(struct ata_port *ap);
192static void ahci_host_stop(struct ata_host_set *host_set);
193static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 192static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
194static void ahci_qc_prep(struct ata_queued_cmd *qc); 193static void ahci_qc_prep(struct ata_queued_cmd *qc);
195static u8 ahci_check_status(struct ata_port *ap); 194static u8 ahci_check_status(struct ata_port *ap);
@@ -242,7 +241,6 @@ static struct ata_port_operations ahci_ops = {
242 241
243 .port_start = ahci_port_start, 242 .port_start = ahci_port_start,
244 .port_stop = ahci_port_stop, 243 .port_stop = ahci_port_stop,
245 .host_stop = ahci_host_stop,
246}; 244};
247 245
248static struct ata_port_info ahci_port_info[] = { 246static struct ata_port_info ahci_port_info[] = {
@@ -252,7 +250,7 @@ static struct ata_port_info ahci_port_info[] = {
252 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 250 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
253 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 251 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
254 ATA_FLAG_PIO_DMA, 252 ATA_FLAG_PIO_DMA,
255 .pio_mask = 0x03, /* pio3-4 */ 253 .pio_mask = 0x1f, /* pio0-4 */
256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 254 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
257 .port_ops = &ahci_ops, 255 .port_ops = &ahci_ops,
258 }, 256 },
@@ -296,17 +294,9 @@ static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int
296 return base + 0x100 + (port * 0x80); 294 return base + 0x100 + (port * 0x80);
297} 295}
298 296
299static inline void *ahci_port_base (void *base, unsigned int port) 297static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
300{ 298{
301 return (void *) ahci_port_base_ul((unsigned long)base, port); 299 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
302}
303
304static void ahci_host_stop(struct ata_host_set *host_set)
305{
306 struct ahci_host_priv *hpriv = host_set->private_data;
307 kfree(hpriv);
308
309 ata_host_stop(host_set);
310} 300}
311 301
312static int ahci_port_start(struct ata_port *ap) 302static int ahci_port_start(struct ata_port *ap)
@@ -314,8 +304,9 @@ static int ahci_port_start(struct ata_port *ap)
314 struct device *dev = ap->host_set->dev; 304 struct device *dev = ap->host_set->dev;
315 struct ahci_host_priv *hpriv = ap->host_set->private_data; 305 struct ahci_host_priv *hpriv = ap->host_set->private_data;
316 struct ahci_port_priv *pp; 306 struct ahci_port_priv *pp;
317 void *mem, *mmio = ap->host_set->mmio_base; 307 void __iomem *mmio = ap->host_set->mmio_base;
318 void *port_mmio = ahci_port_base(mmio, ap->port_no); 308 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
309 void *mem;
319 dma_addr_t mem_dma; 310 dma_addr_t mem_dma;
320 311
321 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 312 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
@@ -383,8 +374,8 @@ static void ahci_port_stop(struct ata_port *ap)
383{ 374{
384 struct device *dev = ap->host_set->dev; 375 struct device *dev = ap->host_set->dev;
385 struct ahci_port_priv *pp = ap->private_data; 376 struct ahci_port_priv *pp = ap->private_data;
386 void *mmio = ap->host_set->mmio_base; 377 void __iomem *mmio = ap->host_set->mmio_base;
387 void *port_mmio = ahci_port_base(mmio, ap->port_no); 378 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
388 u32 tmp; 379 u32 tmp;
389 380
390 tmp = readl(port_mmio + PORT_CMD); 381 tmp = readl(port_mmio + PORT_CMD);
@@ -546,8 +537,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
546 537
547static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) 538static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
548{ 539{
549 void *mmio = ap->host_set->mmio_base; 540 void __iomem *mmio = ap->host_set->mmio_base;
550 void *port_mmio = ahci_port_base(mmio, ap->port_no); 541 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
551 u32 tmp; 542 u32 tmp;
552 int work; 543 int work;
553 544
@@ -595,8 +586,8 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
595static void ahci_eng_timeout(struct ata_port *ap) 586static void ahci_eng_timeout(struct ata_port *ap)
596{ 587{
597 struct ata_host_set *host_set = ap->host_set; 588 struct ata_host_set *host_set = ap->host_set;
598 void *mmio = host_set->mmio_base; 589 void __iomem *mmio = host_set->mmio_base;
599 void *port_mmio = ahci_port_base(mmio, ap->port_no); 590 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
600 struct ata_queued_cmd *qc; 591 struct ata_queued_cmd *qc;
601 unsigned long flags; 592 unsigned long flags;
602 593
@@ -626,8 +617,8 @@ static void ahci_eng_timeout(struct ata_port *ap)
626 617
627static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 618static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
628{ 619{
629 void *mmio = ap->host_set->mmio_base; 620 void __iomem *mmio = ap->host_set->mmio_base;
630 void *port_mmio = ahci_port_base(mmio, ap->port_no); 621 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
631 u32 status, serr, ci; 622 u32 status, serr, ci;
632 623
633 serr = readl(port_mmio + PORT_SCR_ERR); 624 serr = readl(port_mmio + PORT_SCR_ERR);
@@ -663,7 +654,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
663 struct ata_host_set *host_set = dev_instance; 654 struct ata_host_set *host_set = dev_instance;
664 struct ahci_host_priv *hpriv; 655 struct ahci_host_priv *hpriv;
665 unsigned int i, handled = 0; 656 unsigned int i, handled = 0;
666 void *mmio; 657 void __iomem *mmio;
667 u32 irq_stat, irq_ack = 0; 658 u32 irq_stat, irq_ack = 0;
668 659
669 VPRINTK("ENTER\n"); 660 VPRINTK("ENTER\n");
@@ -709,7 +700,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
709static int ahci_qc_issue(struct ata_queued_cmd *qc) 700static int ahci_qc_issue(struct ata_queued_cmd *qc)
710{ 701{
711 struct ata_port *ap = qc->ap; 702 struct ata_port *ap = qc->ap;
712 void *port_mmio = (void *) ap->ioaddr.cmd_addr; 703 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
713 704
714 writel(1, port_mmio + PORT_CMD_ISSUE); 705 writel(1, port_mmio + PORT_CMD_ISSUE);
715 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 706 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
@@ -894,7 +885,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
894{ 885{
895 struct ahci_host_priv *hpriv = probe_ent->private_data; 886 struct ahci_host_priv *hpriv = probe_ent->private_data;
896 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 887 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
897 void *mmio = probe_ent->mmio_base; 888 void __iomem *mmio = probe_ent->mmio_base;
898 u32 vers, cap, impl, speed; 889 u32 vers, cap, impl, speed;
899 const char *speed_s; 890 const char *speed_s;
900 u16 cc; 891 u16 cc;
@@ -967,7 +958,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
967 struct ata_probe_ent *probe_ent = NULL; 958 struct ata_probe_ent *probe_ent = NULL;
968 struct ahci_host_priv *hpriv; 959 struct ahci_host_priv *hpriv;
969 unsigned long base; 960 unsigned long base;
970 void *mmio_base; 961 void __iomem *mmio_base;
971 unsigned int board_idx = (unsigned int) ent->driver_data; 962 unsigned int board_idx = (unsigned int) ent->driver_data;
972 int have_msi, pci_dev_busy = 0; 963 int have_msi, pci_dev_busy = 0;
973 int rc; 964 int rc;
@@ -1004,8 +995,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1004 probe_ent->dev = pci_dev_to_dev(pdev); 995 probe_ent->dev = pci_dev_to_dev(pdev);
1005 INIT_LIST_HEAD(&probe_ent->node); 996 INIT_LIST_HEAD(&probe_ent->node);
1006 997
1007 mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR), 998 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1008 pci_resource_len(pdev, AHCI_PCI_BAR));
1009 if (mmio_base == NULL) { 999 if (mmio_base == NULL) {
1010 rc = -ENOMEM; 1000 rc = -ENOMEM;
1011 goto err_out_free_ent; 1001 goto err_out_free_ent;
@@ -1049,7 +1039,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1049err_out_hpriv: 1039err_out_hpriv:
1050 kfree(hpriv); 1040 kfree(hpriv);
1051err_out_iounmap: 1041err_out_iounmap:
1052 iounmap(mmio_base); 1042 pci_iounmap(pdev, mmio_base);
1053err_out_free_ent: 1043err_out_free_ent:
1054 kfree(probe_ent); 1044 kfree(probe_ent);
1055err_out_msi: 1045err_out_msi:
@@ -1089,7 +1079,8 @@ static void ahci_remove_one (struct pci_dev *pdev)
1089 scsi_host_put(ap->host); 1079 scsi_host_put(ap->host);
1090 } 1080 }
1091 1081
1092 host_set->ops->host_stop(host_set); 1082 kfree(hpriv);
1083 pci_iounmap(pdev, host_set->mmio_base);
1093 kfree(host_set); 1084 kfree(host_set);
1094 1085
1095 if (have_msi) 1086 if (have_msi)
@@ -1106,7 +1097,6 @@ static int __init ahci_init(void)
1106 return pci_module_init(&ahci_pci_driver); 1097 return pci_module_init(&ahci_pci_driver);
1107} 1098}
1108 1099
1109
1110static void __exit ahci_exit(void) 1100static void __exit ahci_exit(void)
1111{ 1101{
1112 pci_unregister_driver(&ahci_pci_driver); 1102 pci_unregister_driver(&ahci_pci_driver);
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index c2523a30a7f5..69ed77fcb71f 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -5,6 +5,7 @@
5config SCSI_AIC79XX 5config SCSI_AIC79XX
6 tristate "Adaptec AIC79xx U320 support" 6 tristate "Adaptec AIC79xx U320 support"
7 depends on PCI && SCSI 7 depends on PCI && SCSI
8 select SCSI_SPI_ATTRS
8 help 9 help
9 This driver supports all of Adaptec's Ultra 320 PCI-X 10 This driver supports all of Adaptec's Ultra 320 PCI-X
10 based SCSI controllers. 11 based SCSI controllers.
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 00f3bd1e181e..527efd36f5c1 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -126,7 +126,6 @@ aic7770_find_device(uint32_t id)
126int 126int
127aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) 127aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
128{ 128{
129 u_long l;
130 int error; 129 int error;
131 int have_seeprom; 130 int have_seeprom;
132 u_int hostconf; 131 u_int hostconf;
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index fd4b2f3eb0c2..653fb0b42aea 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1247,9 +1247,6 @@ struct ahd_softc {
1247 uint16_t user_tagenable;/* Tagged Queuing allowed */ 1247 uint16_t user_tagenable;/* Tagged Queuing allowed */
1248}; 1248};
1249 1249
1250TAILQ_HEAD(ahd_softc_tailq, ahd_softc);
1251extern struct ahd_softc_tailq ahd_tailq;
1252
1253/*************************** IO Cell Configuration ****************************/ 1250/*************************** IO Cell Configuration ****************************/
1254#define AHD_PRECOMP_SLEW_INDEX \ 1251#define AHD_PRECOMP_SLEW_INDEX \
1255 (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) 1252 (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0)
@@ -1374,8 +1371,6 @@ void ahd_enable_coalescing(struct ahd_softc *ahd,
1374void ahd_pause_and_flushwork(struct ahd_softc *ahd); 1371void ahd_pause_and_flushwork(struct ahd_softc *ahd);
1375int ahd_suspend(struct ahd_softc *ahd); 1372int ahd_suspend(struct ahd_softc *ahd);
1376int ahd_resume(struct ahd_softc *ahd); 1373int ahd_resume(struct ahd_softc *ahd);
1377void ahd_softc_insert(struct ahd_softc *);
1378struct ahd_softc *ahd_find_softc(struct ahd_softc *ahd);
1379void ahd_set_unit(struct ahd_softc *, int); 1374void ahd_set_unit(struct ahd_softc *, int);
1380void ahd_set_name(struct ahd_softc *, char *); 1375void ahd_set_name(struct ahd_softc *, char *);
1381struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); 1376struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
@@ -1524,7 +1519,6 @@ void ahd_print_scb(struct scb *scb);
1524void ahd_print_devinfo(struct ahd_softc *ahd, 1519void ahd_print_devinfo(struct ahd_softc *ahd,
1525 struct ahd_devinfo *devinfo); 1520 struct ahd_devinfo *devinfo);
1526void ahd_dump_sglist(struct scb *scb); 1521void ahd_dump_sglist(struct scb *scb);
1527void ahd_dump_all_cards_state(void);
1528void ahd_dump_card_state(struct ahd_softc *ahd); 1522void ahd_dump_card_state(struct ahd_softc *ahd);
1529int ahd_print_register(ahd_reg_parse_entry_t *table, 1523int ahd_print_register(ahd_reg_parse_entry_t *table,
1530 u_int num_entries, 1524 u_int num_entries,
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 137fb1a37dd1..4e8f00df978d 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,8 +52,6 @@
52#include <dev/aic7xxx/aicasm/aicasm_insformat.h> 52#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
53#endif 53#endif
54 54
55/******************************** Globals *************************************/
56struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
57 55
58/***************************** Lookup Tables **********************************/ 56/***************************** Lookup Tables **********************************/
59char *ahd_chip_names[] = 57char *ahd_chip_names[] =
@@ -5180,74 +5178,6 @@ ahd_softc_init(struct ahd_softc *ahd)
5180} 5178}
5181 5179
5182void 5180void
5183ahd_softc_insert(struct ahd_softc *ahd)
5184{
5185 struct ahd_softc *list_ahd;
5186
5187#if AHD_PCI_CONFIG > 0
5188 /*
5189 * Second Function PCI devices need to inherit some
5190 * settings from function 0.
5191 */
5192 if ((ahd->features & AHD_MULTI_FUNC) != 0) {
5193 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5194 ahd_dev_softc_t list_pci;
5195 ahd_dev_softc_t pci;
5196
5197 list_pci = list_ahd->dev_softc;
5198 pci = ahd->dev_softc;
5199 if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci)
5200 && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) {
5201 struct ahd_softc *master;
5202 struct ahd_softc *slave;
5203
5204 if (ahd_get_pci_function(list_pci) == 0) {
5205 master = list_ahd;
5206 slave = ahd;
5207 } else {
5208 master = ahd;
5209 slave = list_ahd;
5210 }
5211 slave->flags &= ~AHD_BIOS_ENABLED;
5212 slave->flags |=
5213 master->flags & AHD_BIOS_ENABLED;
5214 break;
5215 }
5216 }
5217 }
5218#endif
5219
5220 /*
5221 * Insertion sort into our list of softcs.
5222 */
5223 list_ahd = TAILQ_FIRST(&ahd_tailq);
5224 while (list_ahd != NULL
5225 && ahd_softc_comp(ahd, list_ahd) <= 0)
5226 list_ahd = TAILQ_NEXT(list_ahd, links);
5227 if (list_ahd != NULL)
5228 TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
5229 else
5230 TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
5231 ahd->init_level++;
5232}
5233
5234/*
5235 * Verify that the passed in softc pointer is for a
5236 * controller that is still configured.
5237 */
5238struct ahd_softc *
5239ahd_find_softc(struct ahd_softc *ahd)
5240{
5241 struct ahd_softc *list_ahd;
5242
5243 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5244 if (list_ahd == ahd)
5245 return (ahd);
5246 }
5247 return (NULL);
5248}
5249
5250void
5251ahd_set_unit(struct ahd_softc *ahd, int unit) 5181ahd_set_unit(struct ahd_softc *ahd, int unit)
5252{ 5182{
5253 ahd->unit = unit; 5183 ahd->unit = unit;
@@ -7902,18 +7832,10 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7902static void 7832static void
7903ahd_reset_poll(void *arg) 7833ahd_reset_poll(void *arg)
7904{ 7834{
7905 struct ahd_softc *ahd; 7835 struct ahd_softc *ahd = arg;
7906 u_int scsiseq1; 7836 u_int scsiseq1;
7907 u_long l;
7908 u_long s; 7837 u_long s;
7909 7838
7910 ahd_list_lock(&l);
7911 ahd = ahd_find_softc((struct ahd_softc *)arg);
7912 if (ahd == NULL) {
7913 printf("ahd_reset_poll: Instance %p no longer exists\n", arg);
7914 ahd_list_unlock(&l);
7915 return;
7916 }
7917 ahd_lock(ahd, &s); 7839 ahd_lock(ahd, &s);
7918 ahd_pause(ahd); 7840 ahd_pause(ahd);
7919 ahd_update_modes(ahd); 7841 ahd_update_modes(ahd);
@@ -7924,7 +7846,6 @@ ahd_reset_poll(void *arg)
7924 ahd_reset_poll, ahd); 7846 ahd_reset_poll, ahd);
7925 ahd_unpause(ahd); 7847 ahd_unpause(ahd);
7926 ahd_unlock(ahd, &s); 7848 ahd_unlock(ahd, &s);
7927 ahd_list_unlock(&l);
7928 return; 7849 return;
7929 } 7850 }
7930 7851
@@ -7936,25 +7857,16 @@ ahd_reset_poll(void *arg)
7936 ahd->flags &= ~AHD_RESET_POLL_ACTIVE; 7857 ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
7937 ahd_unlock(ahd, &s); 7858 ahd_unlock(ahd, &s);
7938 ahd_release_simq(ahd); 7859 ahd_release_simq(ahd);
7939 ahd_list_unlock(&l);
7940} 7860}
7941 7861
7942/**************************** Statistics Processing ***************************/ 7862/**************************** Statistics Processing ***************************/
7943static void 7863static void
7944ahd_stat_timer(void *arg) 7864ahd_stat_timer(void *arg)
7945{ 7865{
7946 struct ahd_softc *ahd; 7866 struct ahd_softc *ahd = arg;
7947 u_long l;
7948 u_long s; 7867 u_long s;
7949 int enint_coal; 7868 int enint_coal;
7950 7869
7951 ahd_list_lock(&l);
7952 ahd = ahd_find_softc((struct ahd_softc *)arg);
7953 if (ahd == NULL) {
7954 printf("ahd_stat_timer: Instance %p no longer exists\n", arg);
7955 ahd_list_unlock(&l);
7956 return;
7957 }
7958 ahd_lock(ahd, &s); 7870 ahd_lock(ahd, &s);
7959 7871
7960 enint_coal = ahd->hs_mailbox & ENINT_COALESCE; 7872 enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
@@ -7981,7 +7893,6 @@ ahd_stat_timer(void *arg)
7981 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 7893 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
7982 ahd_stat_timer, ahd); 7894 ahd_stat_timer, ahd);
7983 ahd_unlock(ahd, &s); 7895 ahd_unlock(ahd, &s);
7984 ahd_list_unlock(&l);
7985} 7896}
7986 7897
7987/****************************** Status Processing *****************************/ 7898/****************************** Status Processing *****************************/
@@ -8745,16 +8656,6 @@ sized:
8745 return (last_probe); 8656 return (last_probe);
8746} 8657}
8747 8658
8748void
8749ahd_dump_all_cards_state(void)
8750{
8751 struct ahd_softc *list_ahd;
8752
8753 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
8754 ahd_dump_card_state(list_ahd);
8755 }
8756}
8757
8758int 8659int
8759ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, 8660ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
8760 const char *name, u_int address, u_int value, 8661 const char *name, u_int address, u_int value,
@@ -9039,7 +8940,6 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9039 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 8940 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
9040 } 8941 }
9041 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 8942 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
9042 ahd_platform_dump_card_state(ahd);
9043 ahd_restore_modes(ahd, saved_modes); 8943 ahd_restore_modes(ahd, saved_modes);
9044 if (paused == 0) 8944 if (paused == 0)
9045 ahd_unpause(ahd); 8945 ahd_unpause(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 329cb2331339..6b6d4e287793 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -46,32 +46,14 @@
46#include "aic79xx_inline.h" 46#include "aic79xx_inline.h"
47#include <scsi/scsicam.h> 47#include <scsi/scsicam.h>
48 48
49/* 49static struct scsi_transport_template *ahd_linux_transport_template = NULL;
50 * Include aiclib.c as part of our
51 * "module dependencies are hard" work around.
52 */
53#include "aiclib.c"
54 50
55#include <linux/init.h> /* __setup */ 51#include <linux/init.h> /* __setup */
56
57#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
58#include "sd.h" /* For geometry detection */
59#endif
60
61#include <linux/mm.h> /* For fetching system memory size */ 52#include <linux/mm.h> /* For fetching system memory size */
53#include <linux/blkdev.h> /* For block_size() */
62#include <linux/delay.h> /* For ssleep/msleep */ 54#include <linux/delay.h> /* For ssleep/msleep */
63 55
64/* 56/*
65 * Lock protecting manipulation of the ahd softc list.
66 */
67spinlock_t ahd_list_spinlock;
68
69#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
70/* For dynamic sglist size calculation. */
71u_int ahd_linux_nseg;
72#endif
73
74/*
75 * Bucket size for counting good commands in between bad ones. 57 * Bucket size for counting good commands in between bad ones.
76 */ 58 */
77#define AHD_LINUX_ERR_THRESH 1000 59#define AHD_LINUX_ERR_THRESH 1000
@@ -188,71 +170,6 @@ static adapter_tag_info_t aic79xx_tag_info[] =
188}; 170};
189 171
190/* 172/*
191 * By default, read streaming is disabled. In theory,
192 * read streaming should enhance performance, but early
193 * U320 drive firmware actually performs slower with
194 * read streaming enabled.
195 */
196#ifdef CONFIG_AIC79XX_ENABLE_RD_STRM
197#define AIC79XX_CONFIGED_RD_STRM 0xFFFF
198#else
199#define AIC79XX_CONFIGED_RD_STRM 0
200#endif
201
202static uint16_t aic79xx_rd_strm_info[] =
203{
204 AIC79XX_CONFIGED_RD_STRM,
205 AIC79XX_CONFIGED_RD_STRM,
206 AIC79XX_CONFIGED_RD_STRM,
207 AIC79XX_CONFIGED_RD_STRM,
208 AIC79XX_CONFIGED_RD_STRM,
209 AIC79XX_CONFIGED_RD_STRM,
210 AIC79XX_CONFIGED_RD_STRM,
211 AIC79XX_CONFIGED_RD_STRM,
212 AIC79XX_CONFIGED_RD_STRM,
213 AIC79XX_CONFIGED_RD_STRM,
214 AIC79XX_CONFIGED_RD_STRM,
215 AIC79XX_CONFIGED_RD_STRM,
216 AIC79XX_CONFIGED_RD_STRM,
217 AIC79XX_CONFIGED_RD_STRM,
218 AIC79XX_CONFIGED_RD_STRM,
219 AIC79XX_CONFIGED_RD_STRM
220};
221
222/*
223 * DV option:
224 *
225 * positive value = DV Enabled
226 * zero = DV Disabled
227 * negative value = DV Default for adapter type/seeprom
228 */
229#ifdef CONFIG_AIC79XX_DV_SETTING
230#define AIC79XX_CONFIGED_DV CONFIG_AIC79XX_DV_SETTING
231#else
232#define AIC79XX_CONFIGED_DV -1
233#endif
234
235static int8_t aic79xx_dv_settings[] =
236{
237 AIC79XX_CONFIGED_DV,
238 AIC79XX_CONFIGED_DV,
239 AIC79XX_CONFIGED_DV,
240 AIC79XX_CONFIGED_DV,
241 AIC79XX_CONFIGED_DV,
242 AIC79XX_CONFIGED_DV,
243 AIC79XX_CONFIGED_DV,
244 AIC79XX_CONFIGED_DV,
245 AIC79XX_CONFIGED_DV,
246 AIC79XX_CONFIGED_DV,
247 AIC79XX_CONFIGED_DV,
248 AIC79XX_CONFIGED_DV,
249 AIC79XX_CONFIGED_DV,
250 AIC79XX_CONFIGED_DV,
251 AIC79XX_CONFIGED_DV,
252 AIC79XX_CONFIGED_DV
253};
254
255/*
256 * The I/O cell on the chip is very configurable in respect to its analog 173 * The I/O cell on the chip is very configurable in respect to its analog
257 * characteristics. Set the defaults here; they can be overriden with 174 * characteristics. Set the defaults here; they can be overriden with
258 * the proper insmod parameters. 175 * the proper insmod parameters.
@@ -375,13 +292,6 @@ static uint32_t aic79xx_pci_parity = ~0;
375uint32_t aic79xx_allow_memio = ~0; 292uint32_t aic79xx_allow_memio = ~0;
376 293
377/* 294/*
378 * aic79xx_detect() has been run, so register all device arrivals
379 * immediately with the system rather than deferring to the sorted
380 * attachment performed by aic79xx_detect().
381 */
382int aic79xx_detect_complete;
383
384/*
385 * So that we can set how long each device is given as a selection timeout. 295 * So that we can set how long each device is given as a selection timeout.
386 * The table of values goes like this: 296 * The table of values goes like this:
387 * 0 - 256ms 297 * 0 - 256ms
@@ -412,7 +322,7 @@ MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
412MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver"); 322MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver");
413MODULE_LICENSE("Dual BSD/GPL"); 323MODULE_LICENSE("Dual BSD/GPL");
414MODULE_VERSION(AIC79XX_DRIVER_VERSION); 324MODULE_VERSION(AIC79XX_DRIVER_VERSION);
415module_param(aic79xx, charp, 0); 325module_param(aic79xx, charp, 0444);
416MODULE_PARM_DESC(aic79xx, 326MODULE_PARM_DESC(aic79xx,
417"period delimited, options string.\n" 327"period delimited, options string.\n"
418" verbose Enable verbose/diagnostic logging\n" 328" verbose Enable verbose/diagnostic logging\n"
@@ -427,8 +337,6 @@ MODULE_PARM_DESC(aic79xx,
427" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" 337" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
428" tag_info:<tag_str> Set per-target tag depth\n" 338" tag_info:<tag_str> Set per-target tag depth\n"
429" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 339" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
430" rd_strm:<rd_strm_masks> Set per-target read streaming setting.\n"
431" dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
432" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 340" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
433" precomp:<pcomp_list> Set the signal precompensation (0-7).\n" 341" precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
434" amplitude:<int> Set the signal amplitude (0-7).\n" 342" amplitude:<int> Set the signal amplitude (0-7).\n"
@@ -441,249 +349,35 @@ MODULE_PARM_DESC(aic79xx,
441" Shorten the selection timeout to 128ms\n" 349" Shorten the selection timeout to 128ms\n"
442"\n" 350"\n"
443" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 351" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
444"\n" 352"\n");
445" Sample /etc/modprobe.conf line:\n"
446" Change Read Streaming for Controller's 2 and 3\n"
447"\n"
448" options aic79xx 'aic79xx=rd_strm:{..0xFFF0.0xC0F0}'");
449 353
450static void ahd_linux_handle_scsi_status(struct ahd_softc *, 354static void ahd_linux_handle_scsi_status(struct ahd_softc *,
451 struct ahd_linux_device *, 355 struct scsi_device *,
452 struct scb *); 356 struct scb *);
453static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, 357static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
454 Scsi_Cmnd *cmd); 358 struct scsi_cmnd *cmd);
455static void ahd_linux_filter_inquiry(struct ahd_softc *ahd,
456 struct ahd_devinfo *devinfo);
457static void ahd_linux_dev_timed_unfreeze(u_long arg);
458static void ahd_linux_sem_timeout(u_long arg); 359static void ahd_linux_sem_timeout(u_long arg);
360static int ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
459static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd); 361static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
460static void ahd_linux_size_nseg(void);
461static void ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd);
462static void ahd_linux_start_dv(struct ahd_softc *ahd);
463static void ahd_linux_dv_timeout(struct scsi_cmnd *cmd);
464static int ahd_linux_dv_thread(void *data);
465static void ahd_linux_kill_dv_thread(struct ahd_softc *ahd);
466static void ahd_linux_dv_target(struct ahd_softc *ahd, u_int target);
467static void ahd_linux_dv_transition(struct ahd_softc *ahd,
468 struct scsi_cmnd *cmd,
469 struct ahd_devinfo *devinfo,
470 struct ahd_linux_target *targ);
471static void ahd_linux_dv_fill_cmd(struct ahd_softc *ahd,
472 struct scsi_cmnd *cmd,
473 struct ahd_devinfo *devinfo);
474static void ahd_linux_dv_inq(struct ahd_softc *ahd,
475 struct scsi_cmnd *cmd,
476 struct ahd_devinfo *devinfo,
477 struct ahd_linux_target *targ,
478 u_int request_length);
479static void ahd_linux_dv_tur(struct ahd_softc *ahd,
480 struct scsi_cmnd *cmd,
481 struct ahd_devinfo *devinfo);
482static void ahd_linux_dv_rebd(struct ahd_softc *ahd,
483 struct scsi_cmnd *cmd,
484 struct ahd_devinfo *devinfo,
485 struct ahd_linux_target *targ);
486static void ahd_linux_dv_web(struct ahd_softc *ahd,
487 struct scsi_cmnd *cmd,
488 struct ahd_devinfo *devinfo,
489 struct ahd_linux_target *targ);
490static void ahd_linux_dv_reb(struct ahd_softc *ahd,
491 struct scsi_cmnd *cmd,
492 struct ahd_devinfo *devinfo,
493 struct ahd_linux_target *targ);
494static void ahd_linux_dv_su(struct ahd_softc *ahd,
495 struct scsi_cmnd *cmd,
496 struct ahd_devinfo *devinfo,
497 struct ahd_linux_target *targ);
498static int ahd_linux_fallback(struct ahd_softc *ahd,
499 struct ahd_devinfo *devinfo);
500static __inline int ahd_linux_dv_fallback(struct ahd_softc *ahd,
501 struct ahd_devinfo *devinfo);
502static void ahd_linux_dv_complete(Scsi_Cmnd *cmd);
503static void ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ);
504static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, 362static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
505 struct ahd_devinfo *devinfo); 363 struct ahd_devinfo *devinfo);
506static u_int ahd_linux_user_dv_setting(struct ahd_softc *ahd); 364static void ahd_linux_device_queue_depth(struct scsi_device *);
507static void ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd); 365static int ahd_linux_run_command(struct ahd_softc*,
508static void ahd_linux_device_queue_depth(struct ahd_softc *ahd, 366 struct ahd_linux_device *,
509 struct ahd_linux_device *dev); 367 struct scsi_cmnd *);
510static struct ahd_linux_target* ahd_linux_alloc_target(struct ahd_softc*,
511 u_int, u_int);
512static void ahd_linux_free_target(struct ahd_softc*,
513 struct ahd_linux_target*);
514static struct ahd_linux_device* ahd_linux_alloc_device(struct ahd_softc*,
515 struct ahd_linux_target*,
516 u_int);
517static void ahd_linux_free_device(struct ahd_softc*,
518 struct ahd_linux_device*);
519static void ahd_linux_run_device_queue(struct ahd_softc*,
520 struct ahd_linux_device*);
521static void ahd_linux_setup_tag_info_global(char *p); 368static void ahd_linux_setup_tag_info_global(char *p);
522static aic_option_callback_t ahd_linux_setup_tag_info; 369static int aic79xx_setup(char *c);
523static aic_option_callback_t ahd_linux_setup_rd_strm_info;
524static aic_option_callback_t ahd_linux_setup_dv;
525static aic_option_callback_t ahd_linux_setup_iocell_info;
526static int ahd_linux_next_unit(void);
527static void ahd_runq_tasklet(unsigned long data);
528static int aic79xx_setup(char *c);
529
530/****************************** Inlines ***************************************/
531static __inline void ahd_schedule_completeq(struct ahd_softc *ahd);
532static __inline void ahd_schedule_runq(struct ahd_softc *ahd);
533static __inline void ahd_setup_runq_tasklet(struct ahd_softc *ahd);
534static __inline void ahd_teardown_runq_tasklet(struct ahd_softc *ahd);
535static __inline struct ahd_linux_device*
536 ahd_linux_get_device(struct ahd_softc *ahd, u_int channel,
537 u_int target, u_int lun, int alloc);
538static struct ahd_cmd *ahd_linux_run_complete_queue(struct ahd_softc *ahd);
539static __inline void ahd_linux_check_device_queue(struct ahd_softc *ahd,
540 struct ahd_linux_device *dev);
541static __inline struct ahd_linux_device *
542 ahd_linux_next_device_to_run(struct ahd_softc *ahd);
543static __inline void ahd_linux_run_device_queues(struct ahd_softc *ahd);
544static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
545
546static __inline void
547ahd_schedule_completeq(struct ahd_softc *ahd)
548{
549 if ((ahd->platform_data->flags & AHD_RUN_CMPLT_Q_TIMER) == 0) {
550 ahd->platform_data->flags |= AHD_RUN_CMPLT_Q_TIMER;
551 ahd->platform_data->completeq_timer.expires = jiffies;
552 add_timer(&ahd->platform_data->completeq_timer);
553 }
554}
555
556/*
557 * Must be called with our lock held.
558 */
559static __inline void
560ahd_schedule_runq(struct ahd_softc *ahd)
561{
562 tasklet_schedule(&ahd->platform_data->runq_tasklet);
563}
564
565static __inline
566void ahd_setup_runq_tasklet(struct ahd_softc *ahd)
567{
568 tasklet_init(&ahd->platform_data->runq_tasklet, ahd_runq_tasklet,
569 (unsigned long)ahd);
570}
571 370
572static __inline void 371static int ahd_linux_unit;
573ahd_teardown_runq_tasklet(struct ahd_softc *ahd)
574{
575 tasklet_kill(&ahd->platform_data->runq_tasklet);
576}
577 372
578static __inline struct ahd_linux_device*
579ahd_linux_get_device(struct ahd_softc *ahd, u_int channel, u_int target,
580 u_int lun, int alloc)
581{
582 struct ahd_linux_target *targ;
583 struct ahd_linux_device *dev;
584 u_int target_offset;
585 373
586 target_offset = target; 374/****************************** Inlines ***************************************/
587 if (channel != 0) 375static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
588 target_offset += 8;
589 targ = ahd->platform_data->targets[target_offset];
590 if (targ == NULL) {
591 if (alloc != 0) {
592 targ = ahd_linux_alloc_target(ahd, channel, target);
593 if (targ == NULL)
594 return (NULL);
595 } else
596 return (NULL);
597 }
598 dev = targ->devices[lun];
599 if (dev == NULL && alloc != 0)
600 dev = ahd_linux_alloc_device(ahd, targ, lun);
601 return (dev);
602}
603
604#define AHD_LINUX_MAX_RETURNED_ERRORS 4
605static struct ahd_cmd *
606ahd_linux_run_complete_queue(struct ahd_softc *ahd)
607{
608 struct ahd_cmd *acmd;
609 u_long done_flags;
610 int with_errors;
611
612 with_errors = 0;
613 ahd_done_lock(ahd, &done_flags);
614 while ((acmd = TAILQ_FIRST(&ahd->platform_data->completeq)) != NULL) {
615 Scsi_Cmnd *cmd;
616
617 if (with_errors > AHD_LINUX_MAX_RETURNED_ERRORS) {
618 /*
619 * Linux uses stack recursion to requeue
620 * commands that need to be retried. Avoid
621 * blowing out the stack by "spoon feeding"
622 * commands that completed with error back
623 * the operating system in case they are going
624 * to be retried. "ick"
625 */
626 ahd_schedule_completeq(ahd);
627 break;
628 }
629 TAILQ_REMOVE(&ahd->platform_data->completeq,
630 acmd, acmd_links.tqe);
631 cmd = &acmd_scsi_cmd(acmd);
632 cmd->host_scribble = NULL;
633 if (ahd_cmd_get_transaction_status(cmd) != DID_OK
634 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
635 with_errors++;
636
637 cmd->scsi_done(cmd);
638 }
639 ahd_done_unlock(ahd, &done_flags);
640 return (acmd);
641}
642
643static __inline void
644ahd_linux_check_device_queue(struct ahd_softc *ahd,
645 struct ahd_linux_device *dev)
646{
647 if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) != 0
648 && dev->active == 0) {
649 dev->flags &= ~AHD_DEV_FREEZE_TIL_EMPTY;
650 dev->qfrozen--;
651 }
652
653 if (TAILQ_FIRST(&dev->busyq) == NULL
654 || dev->openings == 0 || dev->qfrozen != 0)
655 return;
656
657 ahd_linux_run_device_queue(ahd, dev);
658}
659
660static __inline struct ahd_linux_device *
661ahd_linux_next_device_to_run(struct ahd_softc *ahd)
662{
663
664 if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0
665 || (ahd->platform_data->qfrozen != 0
666 && AHD_DV_SIMQ_FROZEN(ahd) == 0))
667 return (NULL);
668 return (TAILQ_FIRST(&ahd->platform_data->device_runq));
669}
670
671static __inline void
672ahd_linux_run_device_queues(struct ahd_softc *ahd)
673{
674 struct ahd_linux_device *dev;
675
676 while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
677 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
678 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
679 ahd_linux_check_device_queue(ahd, dev);
680 }
681}
682 376
683static __inline void 377static __inline void
684ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 378ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
685{ 379{
686 Scsi_Cmnd *cmd; 380 struct scsi_cmnd *cmd;
687 int direction; 381 int direction;
688 382
689 cmd = scb->io_ctx; 383 cmd = scb->io_ctx;
@@ -705,197 +399,6 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
705#define BUILD_SCSIID(ahd, cmd) \ 399#define BUILD_SCSIID(ahd, cmd) \
706 ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id) 400 ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
707 401
708/************************ Host template entry points *************************/
709static int ahd_linux_detect(Scsi_Host_Template *);
710static const char *ahd_linux_info(struct Scsi_Host *);
711static int ahd_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
712#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
713static int ahd_linux_slave_alloc(Scsi_Device *);
714static int ahd_linux_slave_configure(Scsi_Device *);
715static void ahd_linux_slave_destroy(Scsi_Device *);
716#if defined(__i386__)
717static int ahd_linux_biosparam(struct scsi_device*,
718 struct block_device*, sector_t, int[]);
719#endif
720#else
721static int ahd_linux_release(struct Scsi_Host *);
722static void ahd_linux_select_queue_depth(struct Scsi_Host *host,
723 Scsi_Device *scsi_devs);
724#if defined(__i386__)
725static int ahd_linux_biosparam(Disk *, kdev_t, int[]);
726#endif
727#endif
728static int ahd_linux_bus_reset(Scsi_Cmnd *);
729static int ahd_linux_dev_reset(Scsi_Cmnd *);
730static int ahd_linux_abort(Scsi_Cmnd *);
731
732/*
733 * Calculate a safe value for AHD_NSEG (as expressed through ahd_linux_nseg).
734 *
735 * In pre-2.5.X...
736 * The midlayer allocates an S/G array dynamically when a command is issued
737 * using SCSI malloc. This array, which is in an OS dependent format that
738 * must later be copied to our private S/G list, is sized to house just the
739 * number of segments needed for the current transfer. Since the code that
740 * sizes the SCSI malloc pool does not take into consideration fragmentation
741 * of the pool, executing transactions numbering just a fraction of our
742 * concurrent transaction limit with SG list lengths aproaching AHC_NSEG will
743 * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
744 * mid-layer does not properly handle this scsi malloc failures for the S/G
745 * array and the result can be a lockup of the I/O subsystem. We try to size
746 * our S/G list so that it satisfies our drivers allocation requirements in
747 * addition to avoiding fragmentation of the SCSI malloc pool.
748 */
749static void
750ahd_linux_size_nseg(void)
751{
752#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
753 u_int cur_size;
754 u_int best_size;
755
756 /*
757 * The SCSI allocator rounds to the nearest 512 bytes
758 * an cannot allocate across a page boundary. Our algorithm
759 * is to start at 1K of scsi malloc space per-command and
760 * loop through all factors of the PAGE_SIZE and pick the best.
761 */
762 best_size = 0;
763 for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
764 u_int nseg;
765
766 nseg = cur_size / sizeof(struct scatterlist);
767 if (nseg < AHD_LINUX_MIN_NSEG)
768 continue;
769
770 if (best_size == 0) {
771 best_size = cur_size;
772 ahd_linux_nseg = nseg;
773 } else {
774 u_int best_rem;
775 u_int cur_rem;
776
777 /*
778 * Compare the traits of the current "best_size"
779 * with the current size to determine if the
780 * current size is a better size.
781 */
782 best_rem = best_size % sizeof(struct scatterlist);
783 cur_rem = cur_size % sizeof(struct scatterlist);
784 if (cur_rem < best_rem) {
785 best_size = cur_size;
786 ahd_linux_nseg = nseg;
787 }
788 }
789 }
790#endif
791}
792
793/*
794 * Try to detect an Adaptec 79XX controller.
795 */
796static int
797ahd_linux_detect(Scsi_Host_Template *template)
798{
799 struct ahd_softc *ahd;
800 int found;
801 int error = 0;
802
803#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
804 /*
805 * It is a bug that the upper layer takes
806 * this lock just prior to calling us.
807 */
808 spin_unlock_irq(&io_request_lock);
809#endif
810
811 /*
812 * Sanity checking of Linux SCSI data structures so
813 * that some of our hacks^H^H^H^H^Hassumptions aren't
814 * violated.
815 */
816 if (offsetof(struct ahd_cmd_internal, end)
817 > offsetof(struct scsi_cmnd, host_scribble)) {
818 printf("ahd_linux_detect: SCSI data structures changed.\n");
819 printf("ahd_linux_detect: Unable to attach\n");
820 return (0);
821 }
822 /*
823 * Determine an appropriate size for our Scatter Gatther lists.
824 */
825 ahd_linux_size_nseg();
826#ifdef MODULE
827 /*
828 * If we've been passed any parameters, process them now.
829 */
830 if (aic79xx)
831 aic79xx_setup(aic79xx);
832#endif
833
834 template->proc_name = "aic79xx";
835
836 /*
837 * Initialize our softc list lock prior to
838 * probing for any adapters.
839 */
840 ahd_list_lockinit();
841
842#ifdef CONFIG_PCI
843 error = ahd_linux_pci_init();
844 if (error)
845 return error;
846#endif
847
848 /*
849 * Register with the SCSI layer all
850 * controllers we've found.
851 */
852 found = 0;
853 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
854
855 if (ahd_linux_register_host(ahd, template) == 0)
856 found++;
857 }
858#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
859 spin_lock_irq(&io_request_lock);
860#endif
861 aic79xx_detect_complete++;
862 return 0;
863}
864
865#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
866/*
867 * Free the passed in Scsi_Host memory structures prior to unloading the
868 * module.
869 */
870static int
871ahd_linux_release(struct Scsi_Host * host)
872{
873 struct ahd_softc *ahd;
874 u_long l;
875
876 ahd_list_lock(&l);
877 if (host != NULL) {
878
879 /*
880 * We should be able to just perform
881 * the free directly, but check our
882 * list for extra sanity.
883 */
884 ahd = ahd_find_softc(*(struct ahd_softc **)host->hostdata);
885 if (ahd != NULL) {
886 u_long s;
887
888 ahd_lock(ahd, &s);
889 ahd_intr_enable(ahd, FALSE);
890 ahd_unlock(ahd, &s);
891 ahd_free(ahd);
892 }
893 }
894 ahd_list_unlock(&l);
895 return (0);
896}
897#endif
898
899/* 402/*
900 * Return a string describing the driver. 403 * Return a string describing the driver.
901 */ 404 */
@@ -928,220 +431,177 @@ ahd_linux_info(struct Scsi_Host *host)
928 * Queue an SCB to the controller. 431 * Queue an SCB to the controller.
929 */ 432 */
930static int 433static int
931ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)) 434ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
932{ 435{
933 struct ahd_softc *ahd; 436 struct ahd_softc *ahd;
934 struct ahd_linux_device *dev; 437 struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
935 u_long flags;
936 438
937 ahd = *(struct ahd_softc **)cmd->device->host->hostdata; 439 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
938 440
939 /* 441 /*
940 * Save the callback on completion function.
941 */
942 cmd->scsi_done = scsi_done;
943
944 ahd_midlayer_entrypoint_lock(ahd, &flags);
945
946 /*
947 * Close the race of a command that was in the process of 442 * Close the race of a command that was in the process of
948 * being queued to us just as our simq was frozen. Let 443 * being queued to us just as our simq was frozen. Let
949 * DV commands through so long as we are only frozen to 444 * DV commands through so long as we are only frozen to
950 * perform DV. 445 * perform DV.
951 */ 446 */
952 if (ahd->platform_data->qfrozen != 0 447 if (ahd->platform_data->qfrozen != 0) {
953 && AHD_DV_CMD(cmd) == 0) { 448 printf("%s: queue frozen\n", ahd_name(ahd));
954 449
955 ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ); 450 return SCSI_MLQUEUE_HOST_BUSY;
956 ahd_linux_queue_cmd_complete(ahd, cmd);
957 ahd_schedule_completeq(ahd);
958 ahd_midlayer_entrypoint_unlock(ahd, &flags);
959 return (0);
960 }
961 dev = ahd_linux_get_device(ahd, cmd->device->channel,
962 cmd->device->id, cmd->device->lun,
963 /*alloc*/TRUE);
964 if (dev == NULL) {
965 ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
966 ahd_linux_queue_cmd_complete(ahd, cmd);
967 ahd_schedule_completeq(ahd);
968 ahd_midlayer_entrypoint_unlock(ahd, &flags);
969 printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
970 ahd_name(ahd));
971 return (0);
972 } 451 }
973 if (cmd->cmd_len > MAX_CDB_LEN) 452
974 return (-EINVAL); 453 /*
454 * Save the callback on completion function.
455 */
456 cmd->scsi_done = scsi_done;
457
975 cmd->result = CAM_REQ_INPROG << 16; 458 cmd->result = CAM_REQ_INPROG << 16;
976 TAILQ_INSERT_TAIL(&dev->busyq, (struct ahd_cmd *)cmd, acmd_links.tqe); 459
977 if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) { 460 return ahd_linux_run_command(ahd, dev, cmd);
978 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links); 461}
979 dev->flags |= AHD_DEV_ON_RUN_LIST; 462
980 ahd_linux_run_device_queues(ahd); 463static inline struct scsi_target **
981 } 464ahd_linux_target_in_softc(struct scsi_target *starget)
982 ahd_midlayer_entrypoint_unlock(ahd, &flags); 465{
983 return (0); 466 struct ahd_softc *ahd =
467 *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
468 unsigned int target_offset;
469
470 target_offset = starget->id;
471 if (starget->channel != 0)
472 target_offset += 8;
473
474 return &ahd->platform_data->starget[target_offset];
984} 475}
985 476
986#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
987static int 477static int
988ahd_linux_slave_alloc(Scsi_Device *device) 478ahd_linux_target_alloc(struct scsi_target *starget)
989{ 479{
990 struct ahd_softc *ahd; 480 struct ahd_softc *ahd =
481 *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
482 unsigned long flags;
483 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
484 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
485 struct ahd_devinfo devinfo;
486 struct ahd_initiator_tinfo *tinfo;
487 struct ahd_tmode_tstate *tstate;
488 char channel = starget->channel + 'A';
991 489
992 ahd = *((struct ahd_softc **)device->host->hostdata); 490 ahd_lock(ahd, &flags);
993 if (bootverbose) 491
994 printf("%s: Slave Alloc %d\n", ahd_name(ahd), device->id); 492 BUG_ON(*ahd_targp != NULL);
995 return (0); 493
494 *ahd_targp = starget;
495 memset(targ, 0, sizeof(*targ));
496
497 tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
498 starget->id, &tstate);
499 ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id,
500 CAM_LUN_WILDCARD, channel,
501 ROLE_INITIATOR);
502 spi_min_period(starget) = AHD_SYNCRATE_MAX; /* We can do U320 */
503 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
504 spi_max_offset(starget) = MAX_OFFSET_PACED_BUG;
505 else
506 spi_max_offset(starget) = MAX_OFFSET_PACED;
507 spi_max_width(starget) = ahd->features & AHD_WIDE;
508
509 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
510 AHD_TRANS_GOAL, /*paused*/FALSE);
511 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
512 AHD_TRANS_GOAL, /*paused*/FALSE);
513 ahd_unlock(ahd, &flags);
514
515 return 0;
516}
517
518static void
519ahd_linux_target_destroy(struct scsi_target *starget)
520{
521 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
522
523 *ahd_targp = NULL;
996} 524}
997 525
998static int 526static int
999ahd_linux_slave_configure(Scsi_Device *device) 527ahd_linux_slave_alloc(struct scsi_device *sdev)
1000{ 528{
1001 struct ahd_softc *ahd; 529 struct ahd_softc *ahd =
1002 struct ahd_linux_device *dev; 530 *((struct ahd_softc **)sdev->host->hostdata);
1003 u_long flags; 531 struct scsi_target *starget = sdev->sdev_target;
532 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
533 struct ahd_linux_device *dev;
1004 534
1005 ahd = *((struct ahd_softc **)device->host->hostdata);
1006 if (bootverbose) 535 if (bootverbose)
1007 printf("%s: Slave Configure %d\n", ahd_name(ahd), device->id); 536 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
1008 ahd_midlayer_entrypoint_lock(ahd, &flags); 537
538 BUG_ON(targ->sdev[sdev->lun] != NULL);
539
540 dev = scsi_transport_device_data(sdev);
541 memset(dev, 0, sizeof(*dev));
542
1009 /* 543 /*
1010 * Since Linux has attached to the device, configure 544 * We start out life using untagged
1011 * it so we don't free and allocate the device 545 * transactions of which we allow one.
1012 * structure on every command.
1013 */ 546 */
1014 dev = ahd_linux_get_device(ahd, device->channel, 547 dev->openings = 1;
1015 device->id, device->lun, 548
1016 /*alloc*/TRUE); 549 /*
1017 if (dev != NULL) { 550 * Set maxtags to 0. This will be changed if we
1018 dev->flags &= ~AHD_DEV_UNCONFIGURED; 551 * later determine that we are dealing with
1019 dev->flags |= AHD_DEV_SLAVE_CONFIGURED; 552 * a tagged queuing capable device.
1020 dev->scsi_device = device; 553 */
1021 ahd_linux_device_queue_depth(ahd, dev); 554 dev->maxtags = 0;
1022 } 555
1023 ahd_midlayer_entrypoint_unlock(ahd, &flags); 556 targ->sdev[sdev->lun] = sdev;
557
1024 return (0); 558 return (0);
1025} 559}
1026 560
1027static void 561static int
1028ahd_linux_slave_destroy(Scsi_Device *device) 562ahd_linux_slave_configure(struct scsi_device *sdev)
1029{ 563{
1030 struct ahd_softc *ahd; 564 struct ahd_softc *ahd;
1031 struct ahd_linux_device *dev;
1032 u_long flags;
1033 565
1034 ahd = *((struct ahd_softc **)device->host->hostdata); 566 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1035 if (bootverbose) 567 if (bootverbose)
1036 printf("%s: Slave Destroy %d\n", ahd_name(ahd), device->id); 568 printf("%s: Slave Configure %d\n", ahd_name(ahd), sdev->id);
1037 ahd_midlayer_entrypoint_lock(ahd, &flags);
1038 dev = ahd_linux_get_device(ahd, device->channel,
1039 device->id, device->lun,
1040 /*alloc*/FALSE);
1041 569
1042 /* 570 ahd_linux_device_queue_depth(sdev);
1043 * Filter out "silly" deletions of real devices by only 571
1044 * deleting devices that have had slave_configure() 572 /* Initial Domain Validation */
1045 * called on them. All other devices that have not 573 if (!spi_initial_dv(sdev->sdev_target))
1046 * been configured will automatically be deleted by 574 spi_dv_device(sdev);
1047 * the refcounting process. 575
1048 */ 576 return 0;
1049 if (dev != NULL
1050 && (dev->flags & AHD_DEV_SLAVE_CONFIGURED) != 0) {
1051 dev->flags |= AHD_DEV_UNCONFIGURED;
1052 if (TAILQ_EMPTY(&dev->busyq)
1053 && dev->active == 0
1054 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
1055 ahd_linux_free_device(ahd, dev);
1056 }
1057 ahd_midlayer_entrypoint_unlock(ahd, &flags);
1058} 577}
1059#else 578
1060/*
1061 * Sets the queue depth for each SCSI device hanging
1062 * off the input host adapter.
1063 */
1064static void 579static void
1065ahd_linux_select_queue_depth(struct Scsi_Host * host, 580ahd_linux_slave_destroy(struct scsi_device *sdev)
1066 Scsi_Device * scsi_devs)
1067{ 581{
1068 Scsi_Device *device;
1069 Scsi_Device *ldev;
1070 struct ahd_softc *ahd; 582 struct ahd_softc *ahd;
1071 u_long flags; 583 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
584 struct ahd_linux_target *targ = scsi_transport_target_data(sdev->sdev_target);
1072 585
1073 ahd = *((struct ahd_softc **)host->hostdata); 586 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1074 ahd_lock(ahd, &flags); 587 if (bootverbose)
1075 for (device = scsi_devs; device != NULL; device = device->next) { 588 printf("%s: Slave Destroy %d\n", ahd_name(ahd), sdev->id);
1076 589
1077 /* 590 BUG_ON(dev->active);
1078 * Watch out for duplicate devices. This works around
1079 * some quirks in how the SCSI scanning code does its
1080 * device management.
1081 */
1082 for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
1083 if (ldev->host == device->host
1084 && ldev->channel == device->channel
1085 && ldev->id == device->id
1086 && ldev->lun == device->lun)
1087 break;
1088 }
1089 /* Skip duplicate. */
1090 if (ldev != device)
1091 continue;
1092 591
1093 if (device->host == host) { 592 targ->sdev[sdev->lun] = NULL;
1094 struct ahd_linux_device *dev;
1095 593
1096 /*
1097 * Since Linux has attached to the device, configure
1098 * it so we don't free and allocate the device
1099 * structure on every command.
1100 */
1101 dev = ahd_linux_get_device(ahd, device->channel,
1102 device->id, device->lun,
1103 /*alloc*/TRUE);
1104 if (dev != NULL) {
1105 dev->flags &= ~AHD_DEV_UNCONFIGURED;
1106 dev->scsi_device = device;
1107 ahd_linux_device_queue_depth(ahd, dev);
1108 device->queue_depth = dev->openings
1109 + dev->active;
1110 if ((dev->flags & (AHD_DEV_Q_BASIC
1111 | AHD_DEV_Q_TAGGED)) == 0) {
1112 /*
1113 * We allow the OS to queue 2 untagged
1114 * transactions to us at any time even
1115 * though we can only execute them
1116 * serially on the controller/device.
1117 * This should remove some latency.
1118 */
1119 device->queue_depth = 2;
1120 }
1121 }
1122 }
1123 }
1124 ahd_unlock(ahd, &flags);
1125} 594}
1126#endif
1127 595
1128#if defined(__i386__) 596#if defined(__i386__)
1129/* 597/*
1130 * Return the disk geometry for the given SCSI device. 598 * Return the disk geometry for the given SCSI device.
1131 */ 599 */
1132static int 600static int
1133#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1134ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, 601ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1135 sector_t capacity, int geom[]) 602 sector_t capacity, int geom[])
1136{ 603{
1137 uint8_t *bh; 604 uint8_t *bh;
1138#else
1139ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1140{
1141 struct scsi_device *sdev = disk->device;
1142 u_long capacity = disk->capacity;
1143 struct buffer_head *bh;
1144#endif
1145 int heads; 605 int heads;
1146 int sectors; 606 int sectors;
1147 int cylinders; 607 int cylinders;
@@ -1151,22 +611,11 @@ ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1151 611
1152 ahd = *((struct ahd_softc **)sdev->host->hostdata); 612 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1153 613
1154#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1155 bh = scsi_bios_ptable(bdev); 614 bh = scsi_bios_ptable(bdev);
1156#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
1157 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
1158#else
1159 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
1160#endif
1161
1162 if (bh) { 615 if (bh) {
1163 ret = scsi_partsize(bh, capacity, 616 ret = scsi_partsize(bh, capacity,
1164 &geom[2], &geom[0], &geom[1]); 617 &geom[2], &geom[0], &geom[1]);
1165#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1166 kfree(bh); 618 kfree(bh);
1167#else
1168 brelse(bh);
1169#endif
1170 if (ret != -1) 619 if (ret != -1)
1171 return (ret); 620 return (ret);
1172 } 621 }
@@ -1194,392 +643,35 @@ ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1194 * Abort the current SCSI command(s). 643 * Abort the current SCSI command(s).
1195 */ 644 */
1196static int 645static int
1197ahd_linux_abort(Scsi_Cmnd *cmd) 646ahd_linux_abort(struct scsi_cmnd *cmd)
1198{ 647{
1199 struct ahd_softc *ahd; 648 int error;
1200 struct ahd_cmd *acmd;
1201 struct ahd_cmd *list_acmd;
1202 struct ahd_linux_device *dev;
1203 struct scb *pending_scb;
1204 u_long s;
1205 u_int saved_scbptr;
1206 u_int active_scbptr;
1207 u_int last_phase;
1208 u_int cdb_byte;
1209 int retval;
1210 int was_paused;
1211 int paused;
1212 int wait;
1213 int disconnected;
1214 ahd_mode_state saved_modes;
1215
1216 pending_scb = NULL;
1217 paused = FALSE;
1218 wait = FALSE;
1219 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1220 acmd = (struct ahd_cmd *)cmd;
1221
1222 printf("%s:%d:%d:%d: Attempting to abort cmd %p:",
1223 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1224 cmd->device->lun, cmd);
1225 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
1226 printf(" 0x%x", cmd->cmnd[cdb_byte]);
1227 printf("\n");
1228
1229 /*
1230 * In all versions of Linux, we have to work around
1231 * a major flaw in how the mid-layer is locked down
1232 * if we are to sleep successfully in our error handler
1233 * while allowing our interrupt handler to run. Since
1234 * the midlayer acquires either the io_request_lock or
1235 * our lock prior to calling us, we must use the
1236 * spin_unlock_irq() method for unlocking our lock.
1237 * This will force interrupts to be enabled on the
1238 * current CPU. Since the EH thread should not have
1239 * been running with CPU interrupts disabled other than
1240 * by acquiring either the io_request_lock or our own
1241 * lock, this *should* be safe.
1242 */
1243 ahd_midlayer_entrypoint_lock(ahd, &s);
1244
1245 /*
1246 * First determine if we currently own this command.
1247 * Start by searching the device queue. If not found
1248 * there, check the pending_scb list. If not found
1249 * at all, and the system wanted us to just abort the
1250 * command, return success.
1251 */
1252 dev = ahd_linux_get_device(ahd, cmd->device->channel,
1253 cmd->device->id, cmd->device->lun,
1254 /*alloc*/FALSE);
1255
1256 if (dev == NULL) {
1257 /*
1258 * No target device for this command exists,
1259 * so we must not still own the command.
1260 */
1261 printf("%s:%d:%d:%d: Is not an active device\n",
1262 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1263 cmd->device->lun);
1264 retval = SUCCESS;
1265 goto no_cmd;
1266 }
1267
1268 TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
1269 if (list_acmd == acmd)
1270 break;
1271 }
1272
1273 if (list_acmd != NULL) {
1274 printf("%s:%d:%d:%d: Command found on device queue\n",
1275 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1276 cmd->device->lun);
1277 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
1278 cmd->result = DID_ABORT << 16;
1279 ahd_linux_queue_cmd_complete(ahd, cmd);
1280 retval = SUCCESS;
1281 goto done;
1282 }
1283 649
1284 /* 650 error = ahd_linux_queue_recovery_cmd(cmd, SCB_ABORT);
1285 * See if we can find a matching cmd in the pending list. 651 if (error != 0)
1286 */ 652 printf("aic79xx_abort returns 0x%x\n", error);
1287 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 653 return error;
1288 if (pending_scb->io_ctx == cmd)
1289 break;
1290 }
1291
1292 if (pending_scb == NULL) {
1293 printf("%s:%d:%d:%d: Command not found\n",
1294 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1295 cmd->device->lun);
1296 goto no_cmd;
1297 }
1298
1299 if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
1300 /*
1301 * We can't queue two recovery actions using the same SCB
1302 */
1303 retval = FAILED;
1304 goto done;
1305 }
1306
1307 /*
1308 * Ensure that the card doesn't do anything
1309 * behind our back. Also make sure that we
1310 * didn't "just" miss an interrupt that would
1311 * affect this cmd.
1312 */
1313 was_paused = ahd_is_paused(ahd);
1314 ahd_pause_and_flushwork(ahd);
1315 paused = TRUE;
1316
1317 if ((pending_scb->flags & SCB_ACTIVE) == 0) {
1318 printf("%s:%d:%d:%d: Command already completed\n",
1319 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1320 cmd->device->lun);
1321 goto no_cmd;
1322 }
1323
1324 printf("%s: At time of recovery, card was %spaused\n",
1325 ahd_name(ahd), was_paused ? "" : "not ");
1326 ahd_dump_card_state(ahd);
1327
1328 disconnected = TRUE;
1329 if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A',
1330 cmd->device->lun, SCB_GET_TAG(pending_scb),
1331 ROLE_INITIATOR, CAM_REQ_ABORTED,
1332 SEARCH_COMPLETE) > 0) {
1333 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
1334 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1335 cmd->device->lun);
1336 retval = SUCCESS;
1337 goto done;
1338 }
1339
1340 saved_modes = ahd_save_modes(ahd);
1341 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1342 last_phase = ahd_inb(ahd, LASTPHASE);
1343 saved_scbptr = ahd_get_scbptr(ahd);
1344 active_scbptr = saved_scbptr;
1345 if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
1346 struct scb *bus_scb;
1347
1348 bus_scb = ahd_lookup_scb(ahd, active_scbptr);
1349 if (bus_scb == pending_scb)
1350 disconnected = FALSE;
1351 }
1352
1353 /*
1354 * At this point, pending_scb is the scb associated with the
1355 * passed in command. That command is currently active on the
1356 * bus or is in the disconnected state.
1357 */
1358 if (last_phase != P_BUSFREE
1359 && SCB_GET_TAG(pending_scb) == active_scbptr) {
1360
1361 /*
1362 * We're active on the bus, so assert ATN
1363 * and hope that the target responds.
1364 */
1365 pending_scb = ahd_lookup_scb(ahd, active_scbptr);
1366 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1367 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1368 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1369 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
1370 ahd_name(ahd), cmd->device->channel,
1371 cmd->device->id, cmd->device->lun);
1372 wait = TRUE;
1373 } else if (disconnected) {
1374
1375 /*
1376 * Actually re-queue this SCB in an attempt
1377 * to select the device before it reconnects.
1378 */
1379 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1380 ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
1381 pending_scb->hscb->cdb_len = 0;
1382 pending_scb->hscb->task_attribute = 0;
1383 pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
1384
1385 if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
1386 /*
1387 * Mark the SCB has having an outstanding
1388 * task management function. Should the command
1389 * complete normally before the task management
1390 * function can be sent, the host will be notified
1391 * to abort our requeued SCB.
1392 */
1393 ahd_outb(ahd, SCB_TASK_MANAGEMENT,
1394 pending_scb->hscb->task_management);
1395 } else {
1396 /*
1397 * If non-packetized, set the MK_MESSAGE control
1398 * bit indicating that we desire to send a message.
1399 * We also set the disconnected flag since there is
1400 * no guarantee that our SCB control byte matches
1401 * the version on the card. We don't want the
1402 * sequencer to abort the command thinking an
1403 * unsolicited reselection occurred.
1404 */
1405 pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1406
1407 /*
1408 * The sequencer will never re-reference the
1409 * in-core SCB. To make sure we are notified
1410 * during reslection, set the MK_MESSAGE flag in
1411 * the card's copy of the SCB.
1412 */
1413 ahd_outb(ahd, SCB_CONTROL,
1414 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1415 }
1416
1417 /*
1418 * Clear out any entries in the QINFIFO first
1419 * so we are the next SCB for this target
1420 * to run.
1421 */
1422 ahd_search_qinfifo(ahd, cmd->device->id,
1423 cmd->device->channel + 'A', cmd->device->lun,
1424 SCB_LIST_NULL, ROLE_INITIATOR,
1425 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
1426 ahd_qinfifo_requeue_tail(ahd, pending_scb);
1427 ahd_set_scbptr(ahd, saved_scbptr);
1428 ahd_print_path(ahd, pending_scb);
1429 printf("Device is disconnected, re-queuing SCB\n");
1430 wait = TRUE;
1431 } else {
1432 printf("%s:%d:%d:%d: Unable to deliver message\n",
1433 ahd_name(ahd), cmd->device->channel,
1434 cmd->device->id, cmd->device->lun);
1435 retval = FAILED;
1436 goto done;
1437 }
1438
1439no_cmd:
1440 /*
1441 * Our assumption is that if we don't have the command, no
1442 * recovery action was required, so we return success. Again,
1443 * the semantics of the mid-layer recovery engine are not
1444 * well defined, so this may change in time.
1445 */
1446 retval = SUCCESS;
1447done:
1448 if (paused)
1449 ahd_unpause(ahd);
1450 if (wait) {
1451 struct timer_list timer;
1452 int ret;
1453
1454 pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
1455 spin_unlock_irq(&ahd->platform_data->spin_lock);
1456 init_timer(&timer);
1457 timer.data = (u_long)pending_scb;
1458 timer.expires = jiffies + (5 * HZ);
1459 timer.function = ahd_linux_sem_timeout;
1460 add_timer(&timer);
1461 printf("Recovery code sleeping\n");
1462 down(&ahd->platform_data->eh_sem);
1463 printf("Recovery code awake\n");
1464 ret = del_timer_sync(&timer);
1465 if (ret == 0) {
1466 printf("Timer Expired\n");
1467 retval = FAILED;
1468 }
1469 spin_lock_irq(&ahd->platform_data->spin_lock);
1470 }
1471 ahd_schedule_runq(ahd);
1472 ahd_linux_run_complete_queue(ahd);
1473 ahd_midlayer_entrypoint_unlock(ahd, &s);
1474 return (retval);
1475}
1476
1477
1478static void
1479ahd_linux_dev_reset_complete(Scsi_Cmnd *cmd)
1480{
1481 free(cmd, M_DEVBUF);
1482} 654}
1483 655
1484/* 656/*
1485 * Attempt to send a target reset message to the device that timed out. 657 * Attempt to send a target reset message to the device that timed out.
1486 */ 658 */
1487static int 659static int
1488ahd_linux_dev_reset(Scsi_Cmnd *cmd) 660ahd_linux_dev_reset(struct scsi_cmnd *cmd)
1489{ 661{
1490 struct ahd_softc *ahd; 662 int error;
1491 struct scsi_cmnd *recovery_cmd;
1492 struct ahd_linux_device *dev;
1493 struct ahd_initiator_tinfo *tinfo;
1494 struct ahd_tmode_tstate *tstate;
1495 struct scb *scb;
1496 struct hardware_scb *hscb;
1497 u_long s;
1498 struct timer_list timer;
1499 int retval;
1500
1501 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1502 recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
1503 if (!recovery_cmd)
1504 return (FAILED);
1505 memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
1506 recovery_cmd->device = cmd->device;
1507 recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
1508#ifdef AHD_DEBUG
1509 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1510 printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
1511 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1512 cmd->device->lun, cmd);
1513#endif
1514 ahd_lock(ahd, &s);
1515
1516 dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
1517 cmd->device->lun, /*alloc*/FALSE);
1518 if (dev == NULL) {
1519 ahd_unlock(ahd, &s);
1520 kfree(recovery_cmd);
1521 return (FAILED);
1522 }
1523 if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) {
1524 ahd_unlock(ahd, &s);
1525 kfree(recovery_cmd);
1526 return (FAILED);
1527 }
1528 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
1529 cmd->device->id, &tstate);
1530 recovery_cmd->result = CAM_REQ_INPROG << 16;
1531 recovery_cmd->host_scribble = (char *)scb;
1532 scb->io_ctx = recovery_cmd;
1533 scb->platform_data->dev = dev;
1534 scb->sg_count = 0;
1535 ahd_set_residual(scb, 0);
1536 ahd_set_sense_residual(scb, 0);
1537 hscb = scb->hscb;
1538 hscb->control = 0;
1539 hscb->scsiid = BUILD_SCSIID(ahd, cmd);
1540 hscb->lun = cmd->device->lun;
1541 hscb->cdb_len = 0;
1542 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
1543 scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
1544 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1545 scb->flags |= SCB_PACKETIZED;
1546 } else {
1547 hscb->control |= MK_MESSAGE;
1548 }
1549 dev->openings--;
1550 dev->active++;
1551 dev->commands_issued++;
1552 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1553 ahd_queue_scb(ahd, scb);
1554 663
1555 scb->platform_data->flags |= AHD_SCB_UP_EH_SEM; 664 error = ahd_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
1556 ahd_unlock(ahd, &s); 665 if (error != 0)
1557 init_timer(&timer); 666 printf("aic79xx_dev_reset returns 0x%x\n", error);
1558 timer.data = (u_long)scb; 667 return error;
1559 timer.expires = jiffies + (5 * HZ);
1560 timer.function = ahd_linux_sem_timeout;
1561 add_timer(&timer);
1562 printf("Recovery code sleeping\n");
1563 down(&ahd->platform_data->eh_sem);
1564 printf("Recovery code awake\n");
1565 retval = SUCCESS;
1566 if (del_timer_sync(&timer) == 0) {
1567 printf("Timer Expired\n");
1568 retval = FAILED;
1569 }
1570 ahd_lock(ahd, &s);
1571 ahd_schedule_runq(ahd);
1572 ahd_linux_run_complete_queue(ahd);
1573 ahd_unlock(ahd, &s);
1574 printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
1575 return (retval);
1576} 668}
1577 669
1578/* 670/*
1579 * Reset the SCSI bus. 671 * Reset the SCSI bus.
1580 */ 672 */
1581static int 673static int
1582ahd_linux_bus_reset(Scsi_Cmnd *cmd) 674ahd_linux_bus_reset(struct scsi_cmnd *cmd)
1583{ 675{
1584 struct ahd_softc *ahd; 676 struct ahd_softc *ahd;
1585 u_long s; 677 u_long s;
@@ -1594,7 +686,6 @@ ahd_linux_bus_reset(Scsi_Cmnd *cmd)
1594 ahd_lock(ahd, &s); 686 ahd_lock(ahd, &s);
1595 found = ahd_reset_channel(ahd, cmd->device->channel + 'A', 687 found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
1596 /*initiate reset*/TRUE); 688 /*initiate reset*/TRUE);
1597 ahd_linux_run_complete_queue(ahd);
1598 ahd_unlock(ahd, &s); 689 ahd_unlock(ahd, &s);
1599 690
1600 if (bootverbose) 691 if (bootverbose)
@@ -1604,9 +695,10 @@ ahd_linux_bus_reset(Scsi_Cmnd *cmd)
1604 return (SUCCESS); 695 return (SUCCESS);
1605} 696}
1606 697
1607Scsi_Host_Template aic79xx_driver_template = { 698struct scsi_host_template aic79xx_driver_template = {
1608 .module = THIS_MODULE, 699 .module = THIS_MODULE,
1609 .name = "aic79xx", 700 .name = "aic79xx",
701 .proc_name = "aic79xx",
1610 .proc_info = ahd_linux_proc_info, 702 .proc_info = ahd_linux_proc_info,
1611 .info = ahd_linux_info, 703 .info = ahd_linux_info,
1612 .queuecommand = ahd_linux_queue, 704 .queuecommand = ahd_linux_queue,
@@ -1623,37 +715,10 @@ Scsi_Host_Template aic79xx_driver_template = {
1623 .slave_alloc = ahd_linux_slave_alloc, 715 .slave_alloc = ahd_linux_slave_alloc,
1624 .slave_configure = ahd_linux_slave_configure, 716 .slave_configure = ahd_linux_slave_configure,
1625 .slave_destroy = ahd_linux_slave_destroy, 717 .slave_destroy = ahd_linux_slave_destroy,
718 .target_alloc = ahd_linux_target_alloc,
719 .target_destroy = ahd_linux_target_destroy,
1626}; 720};
1627 721
1628/**************************** Tasklet Handler *********************************/
1629
1630/*
1631 * In 2.4.X and above, this routine is called from a tasklet,
1632 * so we must re-acquire our lock prior to executing this code.
1633 * In all prior kernels, ahd_schedule_runq() calls this routine
1634 * directly and ahd_schedule_runq() is called with our lock held.
1635 */
1636static void
1637ahd_runq_tasklet(unsigned long data)
1638{
1639 struct ahd_softc* ahd;
1640 struct ahd_linux_device *dev;
1641 u_long flags;
1642
1643 ahd = (struct ahd_softc *)data;
1644 ahd_lock(ahd, &flags);
1645 while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
1646
1647 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
1648 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
1649 ahd_linux_check_device_queue(ahd, dev);
1650 /* Yeild to our interrupt handler */
1651 ahd_unlock(ahd, &flags);
1652 ahd_lock(ahd, &flags);
1653 }
1654 ahd_unlock(ahd, &flags);
1655}
1656
1657/******************************** Bus DMA *************************************/ 722/******************************** Bus DMA *************************************/
1658int 723int
1659ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent, 724ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
@@ -1693,36 +758,10 @@ int
1693ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, 758ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
1694 int flags, bus_dmamap_t *mapp) 759 int flags, bus_dmamap_t *mapp)
1695{ 760{
1696 bus_dmamap_t map;
1697
1698 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
1699 if (map == NULL)
1700 return (ENOMEM);
1701 /*
1702 * Although we can dma data above 4GB, our
1703 * "consistent" memory is below 4GB for
1704 * space efficiency reasons (only need a 4byte
1705 * address). For this reason, we have to reset
1706 * our dma mask when doing allocations.
1707 */
1708 if (ahd->dev_softc != NULL)
1709 if (pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) {
1710 printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1711 kfree(map);
1712 return (ENODEV);
1713 }
1714 *vaddr = pci_alloc_consistent(ahd->dev_softc, 761 *vaddr = pci_alloc_consistent(ahd->dev_softc,
1715 dmat->maxsize, &map->bus_addr); 762 dmat->maxsize, mapp);
1716 if (ahd->dev_softc != NULL)
1717 if (pci_set_dma_mask(ahd->dev_softc,
1718 ahd->platform_data->hw_dma_mask)) {
1719 printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1720 kfree(map);
1721 return (ENODEV);
1722 }
1723 if (*vaddr == NULL) 763 if (*vaddr == NULL)
1724 return (ENOMEM); 764 return (ENOMEM);
1725 *mapp = map;
1726 return(0); 765 return(0);
1727} 766}
1728 767
@@ -1731,7 +770,7 @@ ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
1731 void* vaddr, bus_dmamap_t map) 770 void* vaddr, bus_dmamap_t map)
1732{ 771{
1733 pci_free_consistent(ahd->dev_softc, dmat->maxsize, 772 pci_free_consistent(ahd->dev_softc, dmat->maxsize,
1734 vaddr, map->bus_addr); 773 vaddr, map);
1735} 774}
1736 775
1737int 776int
@@ -1745,7 +784,7 @@ ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
1745 */ 784 */
1746 bus_dma_segment_t stack_sg; 785 bus_dma_segment_t stack_sg;
1747 786
1748 stack_sg.ds_addr = map->bus_addr; 787 stack_sg.ds_addr = map;
1749 stack_sg.ds_len = dmat->maxsize; 788 stack_sg.ds_len = dmat->maxsize;
1750 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); 789 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
1751 return (0); 790 return (0);
@@ -1754,11 +793,6 @@ ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
1754void 793void
1755ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) 794ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
1756{ 795{
1757 /*
1758 * The map may is NULL in our < 2.3.X implementation.
1759 */
1760 if (map != NULL)
1761 free(map, M_DEVBUF);
1762} 796}
1763 797
1764int 798int
@@ -1823,41 +857,6 @@ ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1823} 857}
1824 858
1825static void 859static void
1826ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1827{
1828
1829 if ((instance >= 0) && (targ >= 0)
1830 && (instance < NUM_ELEMENTS(aic79xx_tag_info))
1831 && (targ < AHD_NUM_TARGETS)) {
1832 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
1833 if (bootverbose)
1834 printf("tag_info[%d:%d] = %d\n", instance, targ, value);
1835 }
1836}
1837
1838static void
1839ahd_linux_setup_rd_strm_info(u_long arg, int instance, int targ, int32_t value)
1840{
1841 if ((instance >= 0)
1842 && (instance < NUM_ELEMENTS(aic79xx_rd_strm_info))) {
1843 aic79xx_rd_strm_info[instance] = value & 0xFFFF;
1844 if (bootverbose)
1845 printf("rd_strm[%d] = 0x%x\n", instance, value);
1846 }
1847}
1848
1849static void
1850ahd_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
1851{
1852 if ((instance >= 0)
1853 && (instance < NUM_ELEMENTS(aic79xx_dv_settings))) {
1854 aic79xx_dv_settings[instance] = value;
1855 if (bootverbose)
1856 printf("dv[%d] = %d\n", instance, value);
1857 }
1858}
1859
1860static void
1861ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value) 860ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
1862{ 861{
1863 862
@@ -1887,6 +886,99 @@ ahd_linux_setup_tag_info_global(char *p)
1887 } 886 }
1888} 887}
1889 888
889static void
890ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
891{
892
893 if ((instance >= 0) && (targ >= 0)
894 && (instance < NUM_ELEMENTS(aic79xx_tag_info))
895 && (targ < AHD_NUM_TARGETS)) {
896 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
897 if (bootverbose)
898 printf("tag_info[%d:%d] = %d\n", instance, targ, value);
899 }
900}
901
902static char *
903ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
904 void (*callback)(u_long, int, int, int32_t),
905 u_long callback_arg)
906{
907 char *tok_end;
908 char *tok_end2;
909 int i;
910 int instance;
911 int targ;
912 int done;
913 char tok_list[] = {'.', ',', '{', '}', '\0'};
914
915 /* All options use a ':' name/arg separator */
916 if (*opt_arg != ':')
917 return (opt_arg);
918 opt_arg++;
919 instance = -1;
920 targ = -1;
921 done = FALSE;
922 /*
923 * Restore separator that may be in
924 * the middle of our option argument.
925 */
926 tok_end = strchr(opt_arg, '\0');
927 if (tok_end < end)
928 *tok_end = ',';
929 while (!done) {
930 switch (*opt_arg) {
931 case '{':
932 if (instance == -1) {
933 instance = 0;
934 } else {
935 if (depth > 1) {
936 if (targ == -1)
937 targ = 0;
938 } else {
939 printf("Malformed Option %s\n",
940 opt_name);
941 done = TRUE;
942 }
943 }
944 opt_arg++;
945 break;
946 case '}':
947 if (targ != -1)
948 targ = -1;
949 else if (instance != -1)
950 instance = -1;
951 opt_arg++;
952 break;
953 case ',':
954 case '.':
955 if (instance == -1)
956 done = TRUE;
957 else if (targ >= 0)
958 targ++;
959 else if (instance >= 0)
960 instance++;
961 opt_arg++;
962 break;
963 case '\0':
964 done = TRUE;
965 break;
966 default:
967 tok_end = end;
968 for (i = 0; tok_list[i]; i++) {
969 tok_end2 = strchr(opt_arg, tok_list[i]);
970 if ((tok_end2) && (tok_end2 < tok_end))
971 tok_end = tok_end2;
972 }
973 callback(callback_arg, instance, targ,
974 simple_strtol(opt_arg, NULL, 0));
975 opt_arg = tok_end;
976 break;
977 }
978 }
979 return (opt_arg);
980}
981
1890/* 982/*
1891 * Handle Linux boot parameters. This routine allows for assigning a value 983 * Handle Linux boot parameters. This routine allows for assigning a value
1892 * to a parameter with a ':' between the parameter and the value. 984 * to a parameter with a ':' between the parameter and the value.
@@ -1916,8 +1008,6 @@ aic79xx_setup(char *s)
1916 { "seltime", &aic79xx_seltime }, 1008 { "seltime", &aic79xx_seltime },
1917 { "tag_info", NULL }, 1009 { "tag_info", NULL },
1918 { "global_tag_depth", NULL}, 1010 { "global_tag_depth", NULL},
1919 { "rd_strm", NULL },
1920 { "dv", NULL },
1921 { "slewrate", NULL }, 1011 { "slewrate", NULL },
1922 { "precomp", NULL }, 1012 { "precomp", NULL },
1923 { "amplitude", NULL }, 1013 { "amplitude", NULL },
@@ -1946,24 +1036,18 @@ aic79xx_setup(char *s)
1946 if (strncmp(p, "global_tag_depth", n) == 0) { 1036 if (strncmp(p, "global_tag_depth", n) == 0) {
1947 ahd_linux_setup_tag_info_global(p + n); 1037 ahd_linux_setup_tag_info_global(p + n);
1948 } else if (strncmp(p, "tag_info", n) == 0) { 1038 } else if (strncmp(p, "tag_info", n) == 0) {
1949 s = aic_parse_brace_option("tag_info", p + n, end, 1039 s = ahd_parse_brace_option("tag_info", p + n, end,
1950 2, ahd_linux_setup_tag_info, 0); 1040 2, ahd_linux_setup_tag_info, 0);
1951 } else if (strncmp(p, "rd_strm", n) == 0) {
1952 s = aic_parse_brace_option("rd_strm", p + n, end,
1953 1, ahd_linux_setup_rd_strm_info, 0);
1954 } else if (strncmp(p, "dv", n) == 0) {
1955 s = aic_parse_brace_option("dv", p + n, end, 1,
1956 ahd_linux_setup_dv, 0);
1957 } else if (strncmp(p, "slewrate", n) == 0) { 1041 } else if (strncmp(p, "slewrate", n) == 0) {
1958 s = aic_parse_brace_option("slewrate", 1042 s = ahd_parse_brace_option("slewrate",
1959 p + n, end, 1, ahd_linux_setup_iocell_info, 1043 p + n, end, 1, ahd_linux_setup_iocell_info,
1960 AIC79XX_SLEWRATE_INDEX); 1044 AIC79XX_SLEWRATE_INDEX);
1961 } else if (strncmp(p, "precomp", n) == 0) { 1045 } else if (strncmp(p, "precomp", n) == 0) {
1962 s = aic_parse_brace_option("precomp", 1046 s = ahd_parse_brace_option("precomp",
1963 p + n, end, 1, ahd_linux_setup_iocell_info, 1047 p + n, end, 1, ahd_linux_setup_iocell_info,
1964 AIC79XX_PRECOMP_INDEX); 1048 AIC79XX_PRECOMP_INDEX);
1965 } else if (strncmp(p, "amplitude", n) == 0) { 1049 } else if (strncmp(p, "amplitude", n) == 0) {
1966 s = aic_parse_brace_option("amplitude", 1050 s = ahd_parse_brace_option("amplitude",
1967 p + n, end, 1, ahd_linux_setup_iocell_info, 1051 p + n, end, 1, ahd_linux_setup_iocell_info,
1968 AIC79XX_AMPLITUDE_INDEX); 1052 AIC79XX_AMPLITUDE_INDEX);
1969 } else if (p[n] == ':') { 1053 } else if (p[n] == ':') {
@@ -1982,13 +1066,12 @@ __setup("aic79xx=", aic79xx_setup);
1982uint32_t aic79xx_verbose; 1066uint32_t aic79xx_verbose;
1983 1067
1984int 1068int
1985ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template) 1069ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template)
1986{ 1070{
1987 char buf[80]; 1071 char buf[80];
1988 struct Scsi_Host *host; 1072 struct Scsi_Host *host;
1989 char *new_name; 1073 char *new_name;
1990 u_long s; 1074 u_long s;
1991 u_long target;
1992 1075
1993 template->name = ahd->description; 1076 template->name = ahd->description;
1994 host = scsi_host_alloc(template, sizeof(struct ahd_softc *)); 1077 host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
@@ -1997,11 +1080,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
1997 1080
1998 *((struct ahd_softc **)host->hostdata) = ahd; 1081 *((struct ahd_softc **)host->hostdata) = ahd;
1999 ahd_lock(ahd, &s); 1082 ahd_lock(ahd, &s);
2000#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2001 scsi_assign_lock(host, &ahd->platform_data->spin_lock); 1083 scsi_assign_lock(host, &ahd->platform_data->spin_lock);
2002#elif AHD_SCSI_HAS_HOST_LOCK != 0
2003 host->lock = &ahd->platform_data->spin_lock;
2004#endif
2005 ahd->platform_data->host = host; 1084 ahd->platform_data->host = host;
2006 host->can_queue = AHD_MAX_QUEUE; 1085 host->can_queue = AHD_MAX_QUEUE;
2007 host->cmd_per_lun = 2; 1086 host->cmd_per_lun = 2;
@@ -2012,7 +1091,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
2012 host->max_lun = AHD_NUM_LUNS; 1091 host->max_lun = AHD_NUM_LUNS;
2013 host->max_channel = 0; 1092 host->max_channel = 0;
2014 host->sg_tablesize = AHD_NSEG; 1093 host->sg_tablesize = AHD_NSEG;
2015 ahd_set_unit(ahd, ahd_linux_next_unit()); 1094 ahd_set_unit(ahd, ahd_linux_unit++);
2016 sprintf(buf, "scsi%d", host->host_no); 1095 sprintf(buf, "scsi%d", host->host_no);
2017 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 1096 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
2018 if (new_name != NULL) { 1097 if (new_name != NULL) {
@@ -2020,54 +1099,14 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
2020 ahd_set_name(ahd, new_name); 1099 ahd_set_name(ahd, new_name);
2021 } 1100 }
2022 host->unique_id = ahd->unit; 1101 host->unique_id = ahd->unit;
2023#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2024 scsi_set_pci_device(host, ahd->dev_softc);
2025#endif
2026 ahd_linux_setup_user_rd_strm_settings(ahd);
2027 ahd_linux_initialize_scsi_bus(ahd); 1102 ahd_linux_initialize_scsi_bus(ahd);
2028 ahd_unlock(ahd, &s);
2029 ahd->platform_data->dv_pid = kernel_thread(ahd_linux_dv_thread, ahd, 0);
2030 ahd_lock(ahd, &s);
2031 if (ahd->platform_data->dv_pid < 0) {
2032 printf("%s: Failed to create DV thread, error= %d\n",
2033 ahd_name(ahd), ahd->platform_data->dv_pid);
2034 return (-ahd->platform_data->dv_pid);
2035 }
2036 /*
2037 * Initially allocate *all* of our linux target objects
2038 * so that the DV thread will scan them all in parallel
2039 * just after driver initialization. Any device that
2040 * does not exist will have its target object destroyed
2041 * by the selection timeout handler. In the case of a
2042 * device that appears after the initial DV scan, async
2043 * negotiation will occur for the first command, and DV
2044 * will comence should that first command be successful.
2045 */
2046 for (target = 0; target < host->max_id; target++) {
2047
2048 /*
2049 * Skip our own ID. Some Compaq/HP storage devices
2050 * have enclosure management devices that respond to
2051 * single bit selection (i.e. selecting ourselves).
2052 * It is expected that either an external application
2053 * or a modified kernel will be used to probe this
2054 * ID if it is appropriate. To accommodate these
2055 * installations, ahc_linux_alloc_target() will allocate
2056 * for our ID if asked to do so.
2057 */
2058 if (target == ahd->our_id)
2059 continue;
2060
2061 ahd_linux_alloc_target(ahd, 0, target);
2062 }
2063 ahd_intr_enable(ahd, TRUE); 1103 ahd_intr_enable(ahd, TRUE);
2064 ahd_linux_start_dv(ahd);
2065 ahd_unlock(ahd, &s); 1104 ahd_unlock(ahd, &s);
2066 1105
2067#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1106 host->transportt = ahd_linux_transport_template;
1107
2068 scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */ 1108 scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
2069 scsi_scan_host(host); 1109 scsi_scan_host(host);
2070#endif
2071 return (0); 1110 return (0);
2072} 1111}
2073 1112
@@ -2081,29 +1120,6 @@ ahd_linux_get_memsize(void)
2081} 1120}
2082 1121
2083/* 1122/*
2084 * Find the smallest available unit number to use
2085 * for a new device. We don't just use a static
2086 * count to handle the "repeated hot-(un)plug"
2087 * scenario.
2088 */
2089static int
2090ahd_linux_next_unit(void)
2091{
2092 struct ahd_softc *ahd;
2093 int unit;
2094
2095 unit = 0;
2096retry:
2097 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
2098 if (ahd->unit == unit) {
2099 unit++;
2100 goto retry;
2101 }
2102 }
2103 return (unit);
2104}
2105
2106/*
2107 * Place the SCSI bus into a known state by either resetting it, 1123 * Place the SCSI bus into a known state by either resetting it,
2108 * or forcing transfer negotiations on the next command to any 1124 * or forcing transfer negotiations on the next command to any
2109 * target. 1125 * target.
@@ -2162,20 +1178,9 @@ ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
2162 if (ahd->platform_data == NULL) 1178 if (ahd->platform_data == NULL)
2163 return (ENOMEM); 1179 return (ENOMEM);
2164 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data)); 1180 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
2165 TAILQ_INIT(&ahd->platform_data->completeq);
2166 TAILQ_INIT(&ahd->platform_data->device_runq);
2167 ahd->platform_data->irq = AHD_LINUX_NOIRQ; 1181 ahd->platform_data->irq = AHD_LINUX_NOIRQ;
2168 ahd->platform_data->hw_dma_mask = 0xFFFFFFFF;
2169 ahd_lockinit(ahd); 1182 ahd_lockinit(ahd);
2170 ahd_done_lockinit(ahd);
2171 init_timer(&ahd->platform_data->completeq_timer);
2172 ahd->platform_data->completeq_timer.data = (u_long)ahd;
2173 ahd->platform_data->completeq_timer.function =
2174 (ahd_linux_callback_t *)ahd_linux_thread_run_complete_queue;
2175 init_MUTEX_LOCKED(&ahd->platform_data->eh_sem); 1183 init_MUTEX_LOCKED(&ahd->platform_data->eh_sem);
2176 init_MUTEX_LOCKED(&ahd->platform_data->dv_sem);
2177 init_MUTEX_LOCKED(&ahd->platform_data->dv_cmd_sem);
2178 ahd_setup_runq_tasklet(ahd);
2179 ahd->seltime = (aic79xx_seltime & 0x3) << 4; 1184 ahd->seltime = (aic79xx_seltime & 0x3) << 4;
2180 return (0); 1185 return (0);
2181} 1186}
@@ -2183,39 +1188,27 @@ ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
2183void 1188void
2184ahd_platform_free(struct ahd_softc *ahd) 1189ahd_platform_free(struct ahd_softc *ahd)
2185{ 1190{
2186 struct ahd_linux_target *targ; 1191 struct scsi_target *starget;
2187 struct ahd_linux_device *dev;
2188 int i, j; 1192 int i, j;
2189 1193
2190 if (ahd->platform_data != NULL) { 1194 if (ahd->platform_data != NULL) {
2191 del_timer_sync(&ahd->platform_data->completeq_timer);
2192 ahd_linux_kill_dv_thread(ahd);
2193 ahd_teardown_runq_tasklet(ahd);
2194 if (ahd->platform_data->host != NULL) { 1195 if (ahd->platform_data->host != NULL) {
2195#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2196 scsi_remove_host(ahd->platform_data->host); 1196 scsi_remove_host(ahd->platform_data->host);
2197#endif
2198 scsi_host_put(ahd->platform_data->host); 1197 scsi_host_put(ahd->platform_data->host);
2199 } 1198 }
2200 1199
2201 /* destroy all of the device and target objects */ 1200 /* destroy all of the device and target objects */
2202 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1201 for (i = 0; i < AHD_NUM_TARGETS; i++) {
2203 targ = ahd->platform_data->targets[i]; 1202 starget = ahd->platform_data->starget[i];
2204 if (targ != NULL) { 1203 if (starget != NULL) {
2205 /* Keep target around through the loop. */
2206 targ->refcount++;
2207 for (j = 0; j < AHD_NUM_LUNS; j++) { 1204 for (j = 0; j < AHD_NUM_LUNS; j++) {
2208 1205 struct ahd_linux_target *targ =
2209 if (targ->devices[j] == NULL) 1206 scsi_transport_target_data(starget);
1207 if (targ->sdev[j] == NULL)
2210 continue; 1208 continue;
2211 dev = targ->devices[j]; 1209 targ->sdev[j] = NULL;
2212 ahd_linux_free_device(ahd, dev);
2213 } 1210 }
2214 /* 1211 ahd->platform_data->starget[i] = NULL;
2215 * Forcibly free the target now that
2216 * all devices are gone.
2217 */
2218 ahd_linux_free_target(ahd, targ);
2219 } 1212 }
2220 } 1213 }
2221 1214
@@ -2233,16 +1226,6 @@ ahd_platform_free(struct ahd_softc *ahd)
2233 release_mem_region(ahd->platform_data->mem_busaddr, 1226 release_mem_region(ahd->platform_data->mem_busaddr,
2234 0x1000); 1227 0x1000);
2235 } 1228 }
2236#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2237 /*
2238 * In 2.4 we detach from the scsi midlayer before the PCI
2239 * layer invokes our remove callback. No per-instance
2240 * detach is provided, so we must reach inside the PCI
2241 * subsystem's internals and detach our driver manually.
2242 */
2243 if (ahd->dev_softc != NULL)
2244 ahd->dev_softc->driver = NULL;
2245#endif
2246 free(ahd->platform_data, M_DEVBUF); 1229 free(ahd->platform_data, M_DEVBUF);
2247 } 1230 }
2248} 1231}
@@ -2280,13 +1263,22 @@ void
2280ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 1263ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2281 ahd_queue_alg alg) 1264 ahd_queue_alg alg)
2282{ 1265{
1266 struct scsi_target *starget;
1267 struct ahd_linux_target *targ;
2283 struct ahd_linux_device *dev; 1268 struct ahd_linux_device *dev;
1269 struct scsi_device *sdev;
2284 int was_queuing; 1270 int was_queuing;
2285 int now_queuing; 1271 int now_queuing;
2286 1272
2287 dev = ahd_linux_get_device(ahd, devinfo->channel - 'A', 1273 starget = ahd->platform_data->starget[devinfo->target];
2288 devinfo->target, 1274 targ = scsi_transport_target_data(starget);
2289 devinfo->lun, /*alloc*/FALSE); 1275 BUG_ON(targ == NULL);
1276 sdev = targ->sdev[devinfo->lun];
1277 if (sdev == NULL)
1278 return;
1279
1280 dev = scsi_transport_device_data(sdev);
1281
2290 if (dev == NULL) 1282 if (dev == NULL)
2291 return; 1283 return;
2292 was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED); 1284 was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
@@ -2339,1434 +1331,37 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2339 dev->maxtags = 0; 1331 dev->maxtags = 0;
2340 dev->openings = 1 - dev->active; 1332 dev->openings = 1 - dev->active;
2341 } 1333 }
2342#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2343 if (dev->scsi_device != NULL) {
2344 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
2345 case AHD_DEV_Q_BASIC:
2346 scsi_adjust_queue_depth(dev->scsi_device,
2347 MSG_SIMPLE_TASK,
2348 dev->openings + dev->active);
2349 break;
2350 case AHD_DEV_Q_TAGGED:
2351 scsi_adjust_queue_depth(dev->scsi_device,
2352 MSG_ORDERED_TASK,
2353 dev->openings + dev->active);
2354 break;
2355 default:
2356 /*
2357 * We allow the OS to queue 2 untagged transactions to
2358 * us at any time even though we can only execute them
2359 * serially on the controller/device. This should
2360 * remove some latency.
2361 */
2362 scsi_adjust_queue_depth(dev->scsi_device,
2363 /*NON-TAGGED*/0,
2364 /*queue depth*/2);
2365 break;
2366 }
2367 }
2368#endif
2369}
2370
2371int
2372ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
2373 int lun, u_int tag, role_t role, uint32_t status)
2374{
2375 int targ;
2376 int maxtarg;
2377 int maxlun;
2378 int clun;
2379 int count;
2380
2381 if (tag != SCB_LIST_NULL)
2382 return (0);
2383
2384 targ = 0;
2385 if (target != CAM_TARGET_WILDCARD) {
2386 targ = target;
2387 maxtarg = targ + 1;
2388 } else {
2389 maxtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
2390 }
2391 clun = 0;
2392 if (lun != CAM_LUN_WILDCARD) {
2393 clun = lun;
2394 maxlun = clun + 1;
2395 } else {
2396 maxlun = AHD_NUM_LUNS;
2397 }
2398
2399 count = 0;
2400 for (; targ < maxtarg; targ++) {
2401
2402 for (; clun < maxlun; clun++) {
2403 struct ahd_linux_device *dev;
2404 struct ahd_busyq *busyq;
2405 struct ahd_cmd *acmd;
2406
2407 dev = ahd_linux_get_device(ahd, /*chan*/0, targ,
2408 clun, /*alloc*/FALSE);
2409 if (dev == NULL)
2410 continue;
2411
2412 busyq = &dev->busyq;
2413 while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
2414 Scsi_Cmnd *cmd;
2415
2416 cmd = &acmd_scsi_cmd(acmd);
2417 TAILQ_REMOVE(busyq, acmd,
2418 acmd_links.tqe);
2419 count++;
2420 cmd->result = status << 16;
2421 ahd_linux_queue_cmd_complete(ahd, cmd);
2422 }
2423 }
2424 }
2425
2426 return (count);
2427}
2428
2429static void
2430ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd)
2431{
2432 u_long flags;
2433
2434 ahd_lock(ahd, &flags);
2435 del_timer(&ahd->platform_data->completeq_timer);
2436 ahd->platform_data->flags &= ~AHD_RUN_CMPLT_Q_TIMER;
2437 ahd_linux_run_complete_queue(ahd);
2438 ahd_unlock(ahd, &flags);
2439}
2440
2441static void
2442ahd_linux_start_dv(struct ahd_softc *ahd)
2443{
2444
2445 /*
2446 * Freeze the simq and signal ahd_linux_queue to not let any
2447 * more commands through
2448 */
2449 if ((ahd->platform_data->flags & AHD_DV_ACTIVE) == 0) {
2450#ifdef AHD_DEBUG
2451 if (ahd_debug & AHD_SHOW_DV)
2452 printf("%s: Starting DV\n", ahd_name(ahd));
2453#endif
2454
2455 ahd->platform_data->flags |= AHD_DV_ACTIVE;
2456 ahd_freeze_simq(ahd);
2457
2458 /* Wake up the DV kthread */
2459 up(&ahd->platform_data->dv_sem);
2460 }
2461}
2462
2463static int
2464ahd_linux_dv_thread(void *data)
2465{
2466 struct ahd_softc *ahd;
2467 int target;
2468 u_long s;
2469
2470 ahd = (struct ahd_softc *)data;
2471
2472#ifdef AHD_DEBUG
2473 if (ahd_debug & AHD_SHOW_DV)
2474 printf("In DV Thread\n");
2475#endif
2476
2477 /*
2478 * Complete thread creation.
2479 */
2480 lock_kernel();
2481#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
2482 /*
2483 * Don't care about any signals.
2484 */
2485 siginitsetinv(&current->blocked, 0);
2486
2487 daemonize();
2488 sprintf(current->comm, "ahd_dv_%d", ahd->unit);
2489#else
2490 daemonize("ahd_dv_%d", ahd->unit);
2491 current->flags |= PF_NOFREEZE;
2492#endif
2493 unlock_kernel();
2494
2495 while (1) {
2496 /*
2497 * Use down_interruptible() rather than down() to
2498 * avoid inclusion in the load average.
2499 */
2500 down_interruptible(&ahd->platform_data->dv_sem);
2501
2502 /* Check to see if we've been signaled to exit */
2503 ahd_lock(ahd, &s);
2504 if ((ahd->platform_data->flags & AHD_DV_SHUTDOWN) != 0) {
2505 ahd_unlock(ahd, &s);
2506 break;
2507 }
2508 ahd_unlock(ahd, &s);
2509
2510#ifdef AHD_DEBUG
2511 if (ahd_debug & AHD_SHOW_DV)
2512 printf("%s: Beginning Domain Validation\n",
2513 ahd_name(ahd));
2514#endif
2515
2516 /*
2517 * Wait for any pending commands to drain before proceeding.
2518 */
2519 ahd_lock(ahd, &s);
2520 while (LIST_FIRST(&ahd->pending_scbs) != NULL) {
2521 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_EMPTY;
2522 ahd_unlock(ahd, &s);
2523 down_interruptible(&ahd->platform_data->dv_sem);
2524 ahd_lock(ahd, &s);
2525 }
2526
2527 /*
2528 * Wait for the SIMQ to be released so that DV is the
2529 * only reason the queue is frozen.
2530 */
2531 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2532 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2533 ahd_unlock(ahd, &s);
2534 down_interruptible(&ahd->platform_data->dv_sem);
2535 ahd_lock(ahd, &s);
2536 }
2537 ahd_unlock(ahd, &s);
2538
2539 for (target = 0; target < AHD_NUM_TARGETS; target++)
2540 ahd_linux_dv_target(ahd, target);
2541
2542 ahd_lock(ahd, &s);
2543 ahd->platform_data->flags &= ~AHD_DV_ACTIVE;
2544 ahd_unlock(ahd, &s);
2545
2546 /*
2547 * Release the SIMQ so that normal commands are
2548 * allowed to continue on the bus.
2549 */
2550 ahd_release_simq(ahd);
2551 }
2552 up(&ahd->platform_data->eh_sem);
2553 return (0);
2554}
2555
2556static void
2557ahd_linux_kill_dv_thread(struct ahd_softc *ahd)
2558{
2559 u_long s;
2560
2561 ahd_lock(ahd, &s);
2562 if (ahd->platform_data->dv_pid != 0) {
2563 ahd->platform_data->flags |= AHD_DV_SHUTDOWN;
2564 ahd_unlock(ahd, &s);
2565 up(&ahd->platform_data->dv_sem);
2566
2567 /*
2568 * Use the eh_sem as an indicator that the
2569 * dv thread is exiting. Note that the dv
2570 * thread must still return after performing
2571 * the up on our semaphore before it has
2572 * completely exited this module. Unfortunately,
2573 * there seems to be no easy way to wait for the
2574 * exit of a thread for which you are not the
2575 * parent (dv threads are parented by init).
2576 * Cross your fingers...
2577 */
2578 down(&ahd->platform_data->eh_sem);
2579
2580 /*
2581 * Mark the dv thread as already dead. This
2582 * avoids attempting to kill it a second time.
2583 * This is necessary because we must kill the
2584 * DV thread before calling ahd_free() in the
2585 * module shutdown case to avoid bogus locking
2586 * in the SCSI mid-layer, but we ahd_free() is
2587 * called without killing the DV thread in the
2588 * instance detach case, so ahd_platform_free()
2589 * calls us again to verify that the DV thread
2590 * is dead.
2591 */
2592 ahd->platform_data->dv_pid = 0;
2593 } else {
2594 ahd_unlock(ahd, &s);
2595 }
2596}
2597
2598#define AHD_LINUX_DV_INQ_SHORT_LEN 36
2599#define AHD_LINUX_DV_INQ_LEN 256
2600#define AHD_LINUX_DV_TIMEOUT (HZ / 4)
2601
2602#define AHD_SET_DV_STATE(ahd, targ, newstate) \
2603 ahd_set_dv_state(ahd, targ, newstate, __LINE__)
2604
2605static __inline void
2606ahd_set_dv_state(struct ahd_softc *ahd, struct ahd_linux_target *targ,
2607 ahd_dv_state newstate, u_int line)
2608{
2609 ahd_dv_state oldstate;
2610
2611 oldstate = targ->dv_state;
2612#ifdef AHD_DEBUG
2613 if (ahd_debug & AHD_SHOW_DV)
2614 printf("%s:%d: Going from state %d to state %d\n",
2615 ahd_name(ahd), line, oldstate, newstate);
2616#endif
2617
2618 if (oldstate == newstate)
2619 targ->dv_state_retry++;
2620 else
2621 targ->dv_state_retry = 0;
2622 targ->dv_state = newstate;
2623}
2624
2625static void
2626ahd_linux_dv_target(struct ahd_softc *ahd, u_int target_offset)
2627{
2628 struct ahd_devinfo devinfo;
2629 struct ahd_linux_target *targ;
2630 struct scsi_cmnd *cmd;
2631 struct scsi_device *scsi_dev;
2632 struct scsi_sense_data *sense;
2633 uint8_t *buffer;
2634 u_long s;
2635 u_int timeout;
2636 int echo_size;
2637
2638 sense = NULL;
2639 buffer = NULL;
2640 echo_size = 0;
2641 ahd_lock(ahd, &s);
2642 targ = ahd->platform_data->targets[target_offset];
2643 if (targ == NULL || (targ->flags & AHD_DV_REQUIRED) == 0) {
2644 ahd_unlock(ahd, &s);
2645 return;
2646 }
2647 ahd_compile_devinfo(&devinfo, ahd->our_id, targ->target, /*lun*/0,
2648 targ->channel + 'A', ROLE_INITIATOR);
2649#ifdef AHD_DEBUG
2650 if (ahd_debug & AHD_SHOW_DV) {
2651 ahd_print_devinfo(ahd, &devinfo);
2652 printf("Performing DV\n");
2653 }
2654#endif
2655
2656 ahd_unlock(ahd, &s);
2657
2658 cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
2659 scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
2660 scsi_dev->host = ahd->platform_data->host;
2661 scsi_dev->id = devinfo.target;
2662 scsi_dev->lun = devinfo.lun;
2663 scsi_dev->channel = devinfo.channel - 'A';
2664 ahd->platform_data->dv_scsi_dev = scsi_dev;
2665
2666 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_SHORT_ASYNC);
2667
2668 while (targ->dv_state != AHD_DV_STATE_EXIT) {
2669 timeout = AHD_LINUX_DV_TIMEOUT;
2670 switch (targ->dv_state) {
2671 case AHD_DV_STATE_INQ_SHORT_ASYNC:
2672 case AHD_DV_STATE_INQ_ASYNC:
2673 case AHD_DV_STATE_INQ_ASYNC_VERIFY:
2674 /*
2675 * Set things to async narrow to reduce the
2676 * chance that the INQ will fail.
2677 */
2678 ahd_lock(ahd, &s);
2679 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
2680 AHD_TRANS_GOAL, /*paused*/FALSE);
2681 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2682 AHD_TRANS_GOAL, /*paused*/FALSE);
2683 ahd_unlock(ahd, &s);
2684 timeout = 10 * HZ;
2685 targ->flags &= ~AHD_INQ_VALID;
2686 /* FALLTHROUGH */
2687 case AHD_DV_STATE_INQ_VERIFY:
2688 {
2689 u_int inq_len;
2690
2691 if (targ->dv_state == AHD_DV_STATE_INQ_SHORT_ASYNC)
2692 inq_len = AHD_LINUX_DV_INQ_SHORT_LEN;
2693 else
2694 inq_len = targ->inq_data->additional_length + 5;
2695 ahd_linux_dv_inq(ahd, cmd, &devinfo, targ, inq_len);
2696 break;
2697 }
2698 case AHD_DV_STATE_TUR:
2699 case AHD_DV_STATE_BUSY:
2700 timeout = 5 * HZ;
2701 ahd_linux_dv_tur(ahd, cmd, &devinfo);
2702 break;
2703 case AHD_DV_STATE_REBD:
2704 ahd_linux_dv_rebd(ahd, cmd, &devinfo, targ);
2705 break;
2706 case AHD_DV_STATE_WEB:
2707 ahd_linux_dv_web(ahd, cmd, &devinfo, targ);
2708 break;
2709
2710 case AHD_DV_STATE_REB:
2711 ahd_linux_dv_reb(ahd, cmd, &devinfo, targ);
2712 break;
2713
2714 case AHD_DV_STATE_SU:
2715 ahd_linux_dv_su(ahd, cmd, &devinfo, targ);
2716 timeout = 50 * HZ;
2717 break;
2718
2719 default:
2720 ahd_print_devinfo(ahd, &devinfo);
2721 printf("Unknown DV state %d\n", targ->dv_state);
2722 goto out;
2723 }
2724
2725 /* Queue the command and wait for it to complete */
2726 /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
2727 init_timer(&cmd->eh_timeout);
2728#ifdef AHD_DEBUG
2729 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2730 /*
2731 * All of the printfs during negotiation
2732 * really slow down the negotiation.
2733 * Add a bit of time just to be safe.
2734 */
2735 timeout += HZ;
2736#endif
2737 scsi_add_timer(cmd, timeout, ahd_linux_dv_timeout);
2738 /*
2739 * In 2.5.X, it is assumed that all calls from the
2740 * "midlayer" (which we are emulating) will have the
2741 * ahd host lock held. For other kernels, the
2742 * io_request_lock must be held.
2743 */
2744#if AHD_SCSI_HAS_HOST_LOCK != 0
2745 ahd_lock(ahd, &s);
2746#else
2747 spin_lock_irqsave(&io_request_lock, s);
2748#endif
2749 ahd_linux_queue(cmd, ahd_linux_dv_complete);
2750#if AHD_SCSI_HAS_HOST_LOCK != 0
2751 ahd_unlock(ahd, &s);
2752#else
2753 spin_unlock_irqrestore(&io_request_lock, s);
2754#endif
2755 down_interruptible(&ahd->platform_data->dv_cmd_sem);
2756 /*
2757 * Wait for the SIMQ to be released so that DV is the
2758 * only reason the queue is frozen.
2759 */
2760 ahd_lock(ahd, &s);
2761 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2762 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2763 ahd_unlock(ahd, &s);
2764 down_interruptible(&ahd->platform_data->dv_sem);
2765 ahd_lock(ahd, &s);
2766 }
2767 ahd_unlock(ahd, &s);
2768
2769 ahd_linux_dv_transition(ahd, cmd, &devinfo, targ);
2770 }
2771
2772out:
2773 if ((targ->flags & AHD_INQ_VALID) != 0
2774 && ahd_linux_get_device(ahd, devinfo.channel - 'A',
2775 devinfo.target, devinfo.lun,
2776 /*alloc*/FALSE) == NULL) {
2777 /*
2778 * The DV state machine failed to configure this device.
2779 * This is normal if DV is disabled. Since we have inquiry
2780 * data, filter it and use the "optimistic" negotiation
2781 * parameters found in the inquiry string.
2782 */
2783 ahd_linux_filter_inquiry(ahd, &devinfo);
2784 if ((targ->flags & (AHD_BASIC_DV|AHD_ENHANCED_DV)) != 0) {
2785 ahd_print_devinfo(ahd, &devinfo);
2786 printf("DV failed to configure device. "
2787 "Please file a bug report against "
2788 "this driver.\n");
2789 }
2790 }
2791
2792 if (cmd != NULL)
2793 free(cmd, M_DEVBUF);
2794
2795 if (ahd->platform_data->dv_scsi_dev != NULL) {
2796 free(ahd->platform_data->dv_scsi_dev, M_DEVBUF);
2797 ahd->platform_data->dv_scsi_dev = NULL;
2798 }
2799
2800 ahd_lock(ahd, &s);
2801 if (targ->dv_buffer != NULL) {
2802 free(targ->dv_buffer, M_DEVBUF);
2803 targ->dv_buffer = NULL;
2804 }
2805 if (targ->dv_buffer1 != NULL) {
2806 free(targ->dv_buffer1, M_DEVBUF);
2807 targ->dv_buffer1 = NULL;
2808 }
2809 targ->flags &= ~AHD_DV_REQUIRED;
2810 if (targ->refcount == 0)
2811 ahd_linux_free_target(ahd, targ);
2812 ahd_unlock(ahd, &s);
2813}
2814
2815static __inline int
2816ahd_linux_dv_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2817{
2818 u_long s;
2819 int retval;
2820
2821 ahd_lock(ahd, &s);
2822 retval = ahd_linux_fallback(ahd, devinfo);
2823 ahd_unlock(ahd, &s);
2824
2825 return (retval);
2826}
2827
2828static void
2829ahd_linux_dv_transition(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
2830 struct ahd_devinfo *devinfo,
2831 struct ahd_linux_target *targ)
2832{
2833 u_int32_t status;
2834
2835 status = aic_error_action(cmd, targ->inq_data,
2836 ahd_cmd_get_transaction_status(cmd),
2837 ahd_cmd_get_scsi_status(cmd));
2838
2839
2840#ifdef AHD_DEBUG
2841 if (ahd_debug & AHD_SHOW_DV) {
2842 ahd_print_devinfo(ahd, devinfo);
2843 printf("Entering ahd_linux_dv_transition, state= %d, "
2844 "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
2845 status, cmd->result);
2846 }
2847#endif
2848 1334
2849 switch (targ->dv_state) { 1335 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
2850 case AHD_DV_STATE_INQ_SHORT_ASYNC: 1336 case AHD_DEV_Q_BASIC:
2851 case AHD_DV_STATE_INQ_ASYNC: 1337 scsi_adjust_queue_depth(sdev,
2852 switch (status & SS_MASK) { 1338 MSG_SIMPLE_TASK,
2853 case SS_NOP: 1339 dev->openings + dev->active);
2854 {
2855 AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
2856 break;
2857 }
2858 case SS_INQ_REFRESH:
2859 AHD_SET_DV_STATE(ahd, targ,
2860 AHD_DV_STATE_INQ_SHORT_ASYNC);
2861 break;
2862 case SS_TUR:
2863 case SS_RETRY:
2864 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2865 if (ahd_cmd_get_transaction_status(cmd)
2866 == CAM_REQUEUE_REQ)
2867 targ->dv_state_retry--;
2868 if ((status & SS_ERRMASK) == EBUSY)
2869 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
2870 if (targ->dv_state_retry < 10)
2871 break;
2872 /* FALLTHROUGH */
2873 default:
2874 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2875#ifdef AHD_DEBUG
2876 if (ahd_debug & AHD_SHOW_DV) {
2877 ahd_print_devinfo(ahd, devinfo);
2878 printf("Failed DV inquiry, skipping\n");
2879 }
2880#endif
2881 break;
2882 }
2883 break; 1340 break;
2884 case AHD_DV_STATE_INQ_ASYNC_VERIFY: 1341 case AHD_DEV_Q_TAGGED:
2885 switch (status & SS_MASK) { 1342 scsi_adjust_queue_depth(sdev,
2886 case SS_NOP: 1343 MSG_ORDERED_TASK,
2887 { 1344 dev->openings + dev->active);
2888 u_int xportflags;
2889 u_int spi3data;
2890
2891 if (memcmp(targ->inq_data, targ->dv_buffer,
2892 AHD_LINUX_DV_INQ_LEN) != 0) {
2893 /*
2894 * Inquiry data must have changed.
2895 * Try from the top again.
2896 */
2897 AHD_SET_DV_STATE(ahd, targ,
2898 AHD_DV_STATE_INQ_SHORT_ASYNC);
2899 break;
2900 }
2901
2902 AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
2903 targ->flags |= AHD_INQ_VALID;
2904 if (ahd_linux_user_dv_setting(ahd) == 0)
2905 break;
2906
2907 xportflags = targ->inq_data->flags;
2908 if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
2909 break;
2910
2911 spi3data = targ->inq_data->spi3data;
2912 switch (spi3data & SID_SPI_CLOCK_DT_ST) {
2913 default:
2914 case SID_SPI_CLOCK_ST:
2915 /* Assume only basic DV is supported. */
2916 targ->flags |= AHD_BASIC_DV;
2917 break;
2918 case SID_SPI_CLOCK_DT:
2919 case SID_SPI_CLOCK_DT_ST:
2920 targ->flags |= AHD_ENHANCED_DV;
2921 break;
2922 }
2923 break;
2924 }
2925 case SS_INQ_REFRESH:
2926 AHD_SET_DV_STATE(ahd, targ,
2927 AHD_DV_STATE_INQ_SHORT_ASYNC);
2928 break;
2929 case SS_TUR:
2930 case SS_RETRY:
2931 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2932 if (ahd_cmd_get_transaction_status(cmd)
2933 == CAM_REQUEUE_REQ)
2934 targ->dv_state_retry--;
2935
2936 if ((status & SS_ERRMASK) == EBUSY)
2937 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
2938 if (targ->dv_state_retry < 10)
2939 break;
2940 /* FALLTHROUGH */
2941 default:
2942 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2943#ifdef AHD_DEBUG
2944 if (ahd_debug & AHD_SHOW_DV) {
2945 ahd_print_devinfo(ahd, devinfo);
2946 printf("Failed DV inquiry, skipping\n");
2947 }
2948#endif
2949 break;
2950 }
2951 break; 1345 break;
2952 case AHD_DV_STATE_INQ_VERIFY:
2953 switch (status & SS_MASK) {
2954 case SS_NOP:
2955 {
2956
2957 if (memcmp(targ->inq_data, targ->dv_buffer,
2958 AHD_LINUX_DV_INQ_LEN) == 0) {
2959 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2960 break;
2961 }
2962
2963#ifdef AHD_DEBUG
2964 if (ahd_debug & AHD_SHOW_DV) {
2965 int i;
2966
2967 ahd_print_devinfo(ahd, devinfo);
2968 printf("Inquiry buffer mismatch:");
2969 for (i = 0; i < AHD_LINUX_DV_INQ_LEN; i++) {
2970 if ((i & 0xF) == 0)
2971 printf("\n ");
2972 printf("0x%x:0x0%x ",
2973 ((uint8_t *)targ->inq_data)[i],
2974 targ->dv_buffer[i]);
2975 }
2976 printf("\n");
2977 }
2978#endif
2979
2980 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
2981 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2982 break;
2983 }
2984 /*
2985 * Do not count "falling back"
2986 * against our retries.
2987 */
2988 targ->dv_state_retry = 0;
2989 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2990 break;
2991 }
2992 case SS_INQ_REFRESH:
2993 AHD_SET_DV_STATE(ahd, targ,
2994 AHD_DV_STATE_INQ_SHORT_ASYNC);
2995 break;
2996 case SS_TUR:
2997 case SS_RETRY:
2998 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2999 if (ahd_cmd_get_transaction_status(cmd)
3000 == CAM_REQUEUE_REQ) {
3001 targ->dv_state_retry--;
3002 } else if ((status & SSQ_FALLBACK) != 0) {
3003 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3004 AHD_SET_DV_STATE(ahd, targ,
3005 AHD_DV_STATE_EXIT);
3006 break;
3007 }
3008 /*
3009 * Do not count "falling back"
3010 * against our retries.
3011 */
3012 targ->dv_state_retry = 0;
3013 } else if ((status & SS_ERRMASK) == EBUSY)
3014 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3015 if (targ->dv_state_retry < 10)
3016 break;
3017 /* FALLTHROUGH */
3018 default:
3019 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3020#ifdef AHD_DEBUG
3021 if (ahd_debug & AHD_SHOW_DV) {
3022 ahd_print_devinfo(ahd, devinfo);
3023 printf("Failed DV inquiry, skipping\n");
3024 }
3025#endif
3026 break;
3027 }
3028 break;
3029
3030 case AHD_DV_STATE_TUR:
3031 switch (status & SS_MASK) {
3032 case SS_NOP:
3033 if ((targ->flags & AHD_BASIC_DV) != 0) {
3034 ahd_linux_filter_inquiry(ahd, devinfo);
3035 AHD_SET_DV_STATE(ahd, targ,
3036 AHD_DV_STATE_INQ_VERIFY);
3037 } else if ((targ->flags & AHD_ENHANCED_DV) != 0) {
3038 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REBD);
3039 } else {
3040 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3041 }
3042 break;
3043 case SS_RETRY:
3044 case SS_TUR:
3045 if ((status & SS_ERRMASK) == EBUSY) {
3046 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3047 break;
3048 }
3049 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3050 if (ahd_cmd_get_transaction_status(cmd)
3051 == CAM_REQUEUE_REQ) {
3052 targ->dv_state_retry--;
3053 } else if ((status & SSQ_FALLBACK) != 0) {
3054 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3055 AHD_SET_DV_STATE(ahd, targ,
3056 AHD_DV_STATE_EXIT);
3057 break;
3058 }
3059 /*
3060 * Do not count "falling back"
3061 * against our retries.
3062 */
3063 targ->dv_state_retry = 0;
3064 }
3065 if (targ->dv_state_retry >= 10) {
3066#ifdef AHD_DEBUG
3067 if (ahd_debug & AHD_SHOW_DV) {
3068 ahd_print_devinfo(ahd, devinfo);
3069 printf("DV TUR reties exhausted\n");
3070 }
3071#endif
3072 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3073 break;
3074 }
3075 if (status & SSQ_DELAY)
3076 ssleep(1);
3077
3078 break;
3079 case SS_START:
3080 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_SU);
3081 break;
3082 case SS_INQ_REFRESH:
3083 AHD_SET_DV_STATE(ahd, targ,
3084 AHD_DV_STATE_INQ_SHORT_ASYNC);
3085 break;
3086 default:
3087 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3088 break;
3089 }
3090 break;
3091
3092 case AHD_DV_STATE_REBD:
3093 switch (status & SS_MASK) {
3094 case SS_NOP:
3095 {
3096 uint32_t echo_size;
3097
3098 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3099 echo_size = scsi_3btoul(&targ->dv_buffer[1]);
3100 echo_size &= 0x1FFF;
3101#ifdef AHD_DEBUG
3102 if (ahd_debug & AHD_SHOW_DV) {
3103 ahd_print_devinfo(ahd, devinfo);
3104 printf("Echo buffer size= %d\n", echo_size);
3105 }
3106#endif
3107 if (echo_size == 0) {
3108 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3109 break;
3110 }
3111
3112 /* Generate the buffer pattern */
3113 targ->dv_echo_size = echo_size;
3114 ahd_linux_generate_dv_pattern(targ);
3115 /*
3116 * Setup initial negotiation values.
3117 */
3118 ahd_linux_filter_inquiry(ahd, devinfo);
3119 break;
3120 }
3121 case SS_INQ_REFRESH:
3122 AHD_SET_DV_STATE(ahd, targ,
3123 AHD_DV_STATE_INQ_SHORT_ASYNC);
3124 break;
3125 case SS_RETRY:
3126 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3127 if (ahd_cmd_get_transaction_status(cmd)
3128 == CAM_REQUEUE_REQ)
3129 targ->dv_state_retry--;
3130 if (targ->dv_state_retry <= 10)
3131 break;
3132#ifdef AHD_DEBUG
3133 if (ahd_debug & AHD_SHOW_DV) {
3134 ahd_print_devinfo(ahd, devinfo);
3135 printf("DV REBD reties exhausted\n");
3136 }
3137#endif
3138 /* FALLTHROUGH */
3139 case SS_FATAL:
3140 default:
3141 /*
3142 * Setup initial negotiation values
3143 * and try level 1 DV.
3144 */
3145 ahd_linux_filter_inquiry(ahd, devinfo);
3146 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_VERIFY);
3147 targ->dv_echo_size = 0;
3148 break;
3149 }
3150 break;
3151
3152 case AHD_DV_STATE_WEB:
3153 switch (status & SS_MASK) {
3154 case SS_NOP:
3155 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REB);
3156 break;
3157 case SS_INQ_REFRESH:
3158 AHD_SET_DV_STATE(ahd, targ,
3159 AHD_DV_STATE_INQ_SHORT_ASYNC);
3160 break;
3161 case SS_RETRY:
3162 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3163 if (ahd_cmd_get_transaction_status(cmd)
3164 == CAM_REQUEUE_REQ) {
3165 targ->dv_state_retry--;
3166 } else if ((status & SSQ_FALLBACK) != 0) {
3167 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3168 AHD_SET_DV_STATE(ahd, targ,
3169 AHD_DV_STATE_EXIT);
3170 break;
3171 }
3172 /*
3173 * Do not count "falling back"
3174 * against our retries.
3175 */
3176 targ->dv_state_retry = 0;
3177 }
3178 if (targ->dv_state_retry <= 10)
3179 break;
3180 /* FALLTHROUGH */
3181#ifdef AHD_DEBUG
3182 if (ahd_debug & AHD_SHOW_DV) {
3183 ahd_print_devinfo(ahd, devinfo);
3184 printf("DV WEB reties exhausted\n");
3185 }
3186#endif
3187 default:
3188 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3189 break;
3190 }
3191 break;
3192
3193 case AHD_DV_STATE_REB:
3194 switch (status & SS_MASK) {
3195 case SS_NOP:
3196 if (memcmp(targ->dv_buffer, targ->dv_buffer1,
3197 targ->dv_echo_size) != 0) {
3198 if (ahd_linux_dv_fallback(ahd, devinfo) != 0)
3199 AHD_SET_DV_STATE(ahd, targ,
3200 AHD_DV_STATE_EXIT);
3201 else
3202 AHD_SET_DV_STATE(ahd, targ,
3203 AHD_DV_STATE_WEB);
3204 break;
3205 }
3206
3207 if (targ->dv_buffer != NULL) {
3208 free(targ->dv_buffer, M_DEVBUF);
3209 targ->dv_buffer = NULL;
3210 }
3211 if (targ->dv_buffer1 != NULL) {
3212 free(targ->dv_buffer1, M_DEVBUF);
3213 targ->dv_buffer1 = NULL;
3214 }
3215 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3216 break;
3217 case SS_INQ_REFRESH:
3218 AHD_SET_DV_STATE(ahd, targ,
3219 AHD_DV_STATE_INQ_SHORT_ASYNC);
3220 break;
3221 case SS_RETRY:
3222 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3223 if (ahd_cmd_get_transaction_status(cmd)
3224 == CAM_REQUEUE_REQ) {
3225 targ->dv_state_retry--;
3226 } else if ((status & SSQ_FALLBACK) != 0) {
3227 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3228 AHD_SET_DV_STATE(ahd, targ,
3229 AHD_DV_STATE_EXIT);
3230 break;
3231 }
3232 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3233 }
3234 if (targ->dv_state_retry <= 10) {
3235 if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
3236 msleep(ahd->our_id*1000/10);
3237 break;
3238 }
3239#ifdef AHD_DEBUG
3240 if (ahd_debug & AHD_SHOW_DV) {
3241 ahd_print_devinfo(ahd, devinfo);
3242 printf("DV REB reties exhausted\n");
3243 }
3244#endif
3245 /* FALLTHROUGH */
3246 default:
3247 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3248 break;
3249 }
3250 break;
3251
3252 case AHD_DV_STATE_SU:
3253 switch (status & SS_MASK) {
3254 case SS_NOP:
3255 case SS_INQ_REFRESH:
3256 AHD_SET_DV_STATE(ahd, targ,
3257 AHD_DV_STATE_INQ_SHORT_ASYNC);
3258 break;
3259 default:
3260 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3261 break;
3262 }
3263 break;
3264
3265 case AHD_DV_STATE_BUSY:
3266 switch (status & SS_MASK) {
3267 case SS_NOP:
3268 case SS_INQ_REFRESH:
3269 AHD_SET_DV_STATE(ahd, targ,
3270 AHD_DV_STATE_INQ_SHORT_ASYNC);
3271 break;
3272 case SS_TUR:
3273 case SS_RETRY:
3274 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3275 if (ahd_cmd_get_transaction_status(cmd)
3276 == CAM_REQUEUE_REQ) {
3277 targ->dv_state_retry--;
3278 } else if (targ->dv_state_retry < 60) {
3279 if ((status & SSQ_DELAY) != 0)
3280 ssleep(1);
3281 } else {
3282#ifdef AHD_DEBUG
3283 if (ahd_debug & AHD_SHOW_DV) {
3284 ahd_print_devinfo(ahd, devinfo);
3285 printf("DV BUSY reties exhausted\n");
3286 }
3287#endif
3288 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3289 }
3290 break;
3291 default:
3292 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3293 break;
3294 }
3295 break;
3296
3297 default: 1346 default:
3298 printf("%s: Invalid DV completion state %d\n", ahd_name(ahd),
3299 targ->dv_state);
3300 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3301 break;
3302 }
3303}
3304
3305static void
3306ahd_linux_dv_fill_cmd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3307 struct ahd_devinfo *devinfo)
3308{
3309 memset(cmd, 0, sizeof(struct scsi_cmnd));
3310 cmd->device = ahd->platform_data->dv_scsi_dev;
3311 cmd->scsi_done = ahd_linux_dv_complete;
3312}
3313
3314/*
3315 * Synthesize an inquiry command. On the return trip, it'll be
3316 * sniffed and the device transfer settings set for us.
3317 */
3318static void
3319ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3320 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ,
3321 u_int request_length)
3322{
3323
3324#ifdef AHD_DEBUG
3325 if (ahd_debug & AHD_SHOW_DV) {
3326 ahd_print_devinfo(ahd, devinfo);
3327 printf("Sending INQ\n");
3328 }
3329#endif
3330 if (targ->inq_data == NULL)
3331 targ->inq_data = malloc(AHD_LINUX_DV_INQ_LEN,
3332 M_DEVBUF, M_WAITOK);
3333 if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC) {
3334 if (targ->dv_buffer != NULL)
3335 free(targ->dv_buffer, M_DEVBUF);
3336 targ->dv_buffer = malloc(AHD_LINUX_DV_INQ_LEN,
3337 M_DEVBUF, M_WAITOK);
3338 }
3339
3340 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3341 cmd->sc_data_direction = DMA_FROM_DEVICE;
3342 cmd->cmd_len = 6;
3343 cmd->cmnd[0] = INQUIRY;
3344 cmd->cmnd[4] = request_length;
3345 cmd->request_bufflen = request_length;
3346 if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC)
3347 cmd->request_buffer = targ->dv_buffer;
3348 else
3349 cmd->request_buffer = targ->inq_data;
3350 memset(cmd->request_buffer, 0, AHD_LINUX_DV_INQ_LEN);
3351}
3352
3353static void
3354ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3355 struct ahd_devinfo *devinfo)
3356{
3357
3358#ifdef AHD_DEBUG
3359 if (ahd_debug & AHD_SHOW_DV) {
3360 ahd_print_devinfo(ahd, devinfo);
3361 printf("Sending TUR\n");
3362 }
3363#endif
3364 /* Do a TUR to clear out any non-fatal transitional state */
3365 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3366 cmd->sc_data_direction = DMA_NONE;
3367 cmd->cmd_len = 6;
3368 cmd->cmnd[0] = TEST_UNIT_READY;
3369}
3370
3371#define AHD_REBD_LEN 4
3372
3373static void
3374ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3375 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3376{
3377
3378#ifdef AHD_DEBUG
3379 if (ahd_debug & AHD_SHOW_DV) {
3380 ahd_print_devinfo(ahd, devinfo);
3381 printf("Sending REBD\n");
3382 }
3383#endif
3384 if (targ->dv_buffer != NULL)
3385 free(targ->dv_buffer, M_DEVBUF);
3386 targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
3387 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3388 cmd->sc_data_direction = DMA_FROM_DEVICE;
3389 cmd->cmd_len = 10;
3390 cmd->cmnd[0] = READ_BUFFER;
3391 cmd->cmnd[1] = 0x0b;
3392 scsi_ulto3b(AHD_REBD_LEN, &cmd->cmnd[6]);
3393 cmd->request_bufflen = AHD_REBD_LEN;
3394 cmd->underflow = cmd->request_bufflen;
3395 cmd->request_buffer = targ->dv_buffer;
3396}
3397
3398static void
3399ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3400 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3401{
3402
3403#ifdef AHD_DEBUG
3404 if (ahd_debug & AHD_SHOW_DV) {
3405 ahd_print_devinfo(ahd, devinfo);
3406 printf("Sending WEB\n");
3407 }
3408#endif
3409 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3410 cmd->sc_data_direction = DMA_TO_DEVICE;
3411 cmd->cmd_len = 10;
3412 cmd->cmnd[0] = WRITE_BUFFER;
3413 cmd->cmnd[1] = 0x0a;
3414 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3415 cmd->request_bufflen = targ->dv_echo_size;
3416 cmd->underflow = cmd->request_bufflen;
3417 cmd->request_buffer = targ->dv_buffer;
3418}
3419
3420static void
3421ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3422 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3423{
3424
3425#ifdef AHD_DEBUG
3426 if (ahd_debug & AHD_SHOW_DV) {
3427 ahd_print_devinfo(ahd, devinfo);
3428 printf("Sending REB\n");
3429 }
3430#endif
3431 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3432 cmd->sc_data_direction = DMA_FROM_DEVICE;
3433 cmd->cmd_len = 10;
3434 cmd->cmnd[0] = READ_BUFFER;
3435 cmd->cmnd[1] = 0x0a;
3436 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3437 cmd->request_bufflen = targ->dv_echo_size;
3438 cmd->underflow = cmd->request_bufflen;
3439 cmd->request_buffer = targ->dv_buffer1;
3440}
3441
3442static void
3443ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3444 struct ahd_devinfo *devinfo,
3445 struct ahd_linux_target *targ)
3446{
3447 u_int le;
3448
3449 le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
3450
3451#ifdef AHD_DEBUG
3452 if (ahd_debug & AHD_SHOW_DV) {
3453 ahd_print_devinfo(ahd, devinfo);
3454 printf("Sending SU\n");
3455 }
3456#endif
3457 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3458 cmd->sc_data_direction = DMA_NONE;
3459 cmd->cmd_len = 6;
3460 cmd->cmnd[0] = START_STOP_UNIT;
3461 cmd->cmnd[4] = le | SSS_START;
3462}
3463
3464static int
3465ahd_linux_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3466{
3467 struct ahd_linux_target *targ;
3468 struct ahd_initiator_tinfo *tinfo;
3469 struct ahd_transinfo *goal;
3470 struct ahd_tmode_tstate *tstate;
3471 u_int width;
3472 u_int period;
3473 u_int offset;
3474 u_int ppr_options;
3475 u_int cur_speed;
3476 u_int wide_speed;
3477 u_int narrow_speed;
3478 u_int fallback_speed;
3479
3480#ifdef AHD_DEBUG
3481 if (ahd_debug & AHD_SHOW_DV) {
3482 ahd_print_devinfo(ahd, devinfo);
3483 printf("Trying to fallback\n");
3484 }
3485#endif
3486 targ = ahd->platform_data->targets[devinfo->target_offset];
3487 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
3488 devinfo->our_scsiid,
3489 devinfo->target, &tstate);
3490 goal = &tinfo->goal;
3491 width = goal->width;
3492 period = goal->period;
3493 offset = goal->offset;
3494 ppr_options = goal->ppr_options;
3495 if (offset == 0)
3496 period = AHD_ASYNC_XFER_PERIOD;
3497 if (targ->dv_next_narrow_period == 0)
3498 targ->dv_next_narrow_period = MAX(period, AHD_SYNCRATE_ULTRA2);
3499 if (targ->dv_next_wide_period == 0)
3500 targ->dv_next_wide_period = period;
3501 if (targ->dv_max_width == 0)
3502 targ->dv_max_width = width;
3503 if (targ->dv_max_ppr_options == 0)
3504 targ->dv_max_ppr_options = ppr_options;
3505 if (targ->dv_last_ppr_options == 0)
3506 targ->dv_last_ppr_options = ppr_options;
3507
3508 cur_speed = aic_calc_speed(width, period, offset, AHD_SYNCRATE_MIN);
3509 wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
3510 targ->dv_next_wide_period,
3511 MAX_OFFSET, AHD_SYNCRATE_MIN);
3512 narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
3513 targ->dv_next_narrow_period,
3514 MAX_OFFSET, AHD_SYNCRATE_MIN);
3515 fallback_speed = aic_calc_speed(width, period+1, offset,
3516 AHD_SYNCRATE_MIN);
3517#ifdef AHD_DEBUG
3518 if (ahd_debug & AHD_SHOW_DV) {
3519 printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
3520 "fallback_speed= %d\n", cur_speed, wide_speed,
3521 narrow_speed, fallback_speed);
3522 }
3523#endif
3524
3525 if (cur_speed > 160000) {
3526 /*
3527 * Paced/DT/IU_REQ only transfer speeds. All we
3528 * can do is fallback in terms of syncrate.
3529 */
3530 period++;
3531 } else if (cur_speed > 80000) {
3532 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3533 /*
3534 * Try without IU_REQ as it may be confusing
3535 * an expander.
3536 */
3537 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3538 } else {
3539 /*
3540 * Paced/DT only transfer speeds. All we
3541 * can do is fallback in terms of syncrate.
3542 */
3543 period++;
3544 ppr_options = targ->dv_max_ppr_options;
3545 }
3546 } else if (cur_speed > 3300) {
3547
3548 /* 1347 /*
3549 * In this range we the following 1348 * We allow the OS to queue 2 untagged transactions to
3550 * options ordered from highest to 1349 * us at any time even though we can only execute them
3551 * lowest desireability: 1350 * serially on the controller/device. This should
3552 * 1351 * remove some latency.
3553 * o Wide/DT
3554 * o Wide/non-DT
3555 * o Narrow at a potentally higher sync rate.
3556 *
3557 * All modes are tested with and without IU_REQ
3558 * set since using IUs may confuse an expander.
3559 */ 1352 */
3560 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 1353 scsi_adjust_queue_depth(sdev,
3561 1354 /*NON-TAGGED*/0,
3562 ppr_options &= ~MSG_EXT_PPR_IU_REQ; 1355 /*queue depth*/2);
3563 } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 1356 break;
3564 /*
3565 * Try going non-DT.
3566 */
3567 ppr_options = targ->dv_max_ppr_options;
3568 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
3569 } else if (targ->dv_last_ppr_options != 0) {
3570 /*
3571 * Try without QAS or any other PPR options.
3572 * We may need a non-PPR message to work with
3573 * an expander. We look at the "last PPR options"
3574 * so we will perform this fallback even if the
3575 * target responded to our PPR negotiation with
3576 * no option bits set.
3577 */
3578 ppr_options = 0;
3579 } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
3580 /*
3581 * If the next narrow speed is greater than
3582 * the next wide speed, fallback to narrow.
3583 * Otherwise fallback to the next DT/Wide setting.
3584 * The narrow async speed will always be smaller
3585 * than the wide async speed, so handle this case
3586 * specifically.
3587 */
3588 ppr_options = targ->dv_max_ppr_options;
3589 if (narrow_speed > fallback_speed
3590 || period >= AHD_ASYNC_XFER_PERIOD) {
3591 targ->dv_next_wide_period = period+1;
3592 width = MSG_EXT_WDTR_BUS_8_BIT;
3593 period = targ->dv_next_narrow_period;
3594 } else {
3595 period++;
3596 }
3597 } else if ((ahd->features & AHD_WIDE) != 0
3598 && targ->dv_max_width != 0
3599 && wide_speed >= fallback_speed
3600 && (targ->dv_next_wide_period <= AHD_ASYNC_XFER_PERIOD
3601 || period >= AHD_ASYNC_XFER_PERIOD)) {
3602
3603 /*
3604 * We are narrow. Try falling back
3605 * to the next wide speed with
3606 * all supported ppr options set.
3607 */
3608 targ->dv_next_narrow_period = period+1;
3609 width = MSG_EXT_WDTR_BUS_16_BIT;
3610 period = targ->dv_next_wide_period;
3611 ppr_options = targ->dv_max_ppr_options;
3612 } else {
3613 /* Only narrow fallback is allowed. */
3614 period++;
3615 ppr_options = targ->dv_max_ppr_options;
3616 }
3617 } else {
3618 return (-1);
3619 }
3620 offset = MAX_OFFSET;
3621 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_PACED);
3622 ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, FALSE);
3623 if (period == 0) {
3624 period = 0;
3625 offset = 0;
3626 ppr_options = 0;
3627 if (width == MSG_EXT_WDTR_BUS_8_BIT)
3628 targ->dv_next_narrow_period = AHD_ASYNC_XFER_PERIOD;
3629 else
3630 targ->dv_next_wide_period = AHD_ASYNC_XFER_PERIOD;
3631 }
3632 ahd_set_syncrate(ahd, devinfo, period, offset,
3633 ppr_options, AHD_TRANS_GOAL, FALSE);
3634 targ->dv_last_ppr_options = ppr_options;
3635 return (0);
3636}
3637
3638static void
3639ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
3640{
3641 struct ahd_softc *ahd;
3642 struct scb *scb;
3643 u_long flags;
3644
3645 ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3646 ahd_lock(ahd, &flags);
3647
3648#ifdef AHD_DEBUG
3649 if (ahd_debug & AHD_SHOW_DV) {
3650 printf("%s: Timeout while doing DV command %x.\n",
3651 ahd_name(ahd), cmd->cmnd[0]);
3652 ahd_dump_card_state(ahd);
3653 }
3654#endif
3655
3656 /*
3657 * Guard against "done race". No action is
3658 * required if we just completed.
3659 */
3660 if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
3661 ahd_unlock(ahd, &flags);
3662 return;
3663 } 1357 }
3664
3665 /*
3666 * Command has not completed. Mark this
3667 * SCB as having failing status prior to
3668 * resetting the bus, so we get the correct
3669 * error code.
3670 */
3671 if ((scb->flags & SCB_SENSE) != 0)
3672 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
3673 else
3674 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
3675 ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
3676
3677 /*
3678 * Add a minimal bus settle delay for devices that are slow to
3679 * respond after bus resets.
3680 */
3681 ahd_freeze_simq(ahd);
3682 init_timer(&ahd->platform_data->reset_timer);
3683 ahd->platform_data->reset_timer.data = (u_long)ahd;
3684 ahd->platform_data->reset_timer.expires = jiffies + HZ / 2;
3685 ahd->platform_data->reset_timer.function =
3686 (ahd_linux_callback_t *)ahd_release_simq;
3687 add_timer(&ahd->platform_data->reset_timer);
3688 if (ahd_linux_next_device_to_run(ahd) != NULL)
3689 ahd_schedule_runq(ahd);
3690 ahd_linux_run_complete_queue(ahd);
3691 ahd_unlock(ahd, &flags);
3692}
3693
3694static void
3695ahd_linux_dv_complete(struct scsi_cmnd *cmd)
3696{
3697 struct ahd_softc *ahd;
3698
3699 ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3700
3701 /* Delete the DV timer before it goes off! */
3702 scsi_delete_timer(cmd);
3703
3704#ifdef AHD_DEBUG
3705 if (ahd_debug & AHD_SHOW_DV)
3706 printf("%s:%c:%d: Command completed, status= 0x%x\n",
3707 ahd_name(ahd), cmd->device->channel, cmd->device->id,
3708 cmd->result);
3709#endif
3710
3711 /* Wake up the state machine */
3712 up(&ahd->platform_data->dv_cmd_sem);
3713} 1358}
3714 1359
3715static void 1360int
3716ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ) 1361ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
1362 int lun, u_int tag, role_t role, uint32_t status)
3717{ 1363{
3718 uint16_t b; 1364 return 0;
3719 u_int i;
3720 u_int j;
3721
3722 if (targ->dv_buffer != NULL)
3723 free(targ->dv_buffer, M_DEVBUF);
3724 targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3725 if (targ->dv_buffer1 != NULL)
3726 free(targ->dv_buffer1, M_DEVBUF);
3727 targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3728
3729 i = 0;
3730
3731 b = 0x0001;
3732 for (j = 0 ; i < targ->dv_echo_size; j++) {
3733 if (j < 32) {
3734 /*
3735 * 32bytes of sequential numbers.
3736 */
3737 targ->dv_buffer[i++] = j & 0xff;
3738 } else if (j < 48) {
3739 /*
3740 * 32bytes of repeating 0x0000, 0xffff.
3741 */
3742 targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
3743 } else if (j < 64) {
3744 /*
3745 * 32bytes of repeating 0x5555, 0xaaaa.
3746 */
3747 targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
3748 } else {
3749 /*
3750 * Remaining buffer is filled with a repeating
3751 * patter of:
3752 *
3753 * 0xffff
3754 * ~0x0001 << shifted once in each loop.
3755 */
3756 if (j & 0x02) {
3757 if (j & 0x01) {
3758 targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
3759 b <<= 1;
3760 if (b == 0x0000)
3761 b = 0x0001;
3762 } else {
3763 targ->dv_buffer[i++] = (~b & 0xff);
3764 }
3765 } else {
3766 targ->dv_buffer[i++] = 0xff;
3767 }
3768 }
3769 }
3770} 1365}
3771 1366
3772static u_int 1367static u_int
@@ -3800,100 +1395,23 @@ ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3800 return (tags); 1395 return (tags);
3801} 1396}
3802 1397
3803static u_int
3804ahd_linux_user_dv_setting(struct ahd_softc *ahd)
3805{
3806 static int warned_user;
3807 int dv;
3808
3809 if (ahd->unit >= NUM_ELEMENTS(aic79xx_dv_settings)) {
3810
3811 if (warned_user == 0) {
3812 printf(KERN_WARNING
3813"aic79xx: WARNING: Insufficient dv settings instances\n"
3814"aic79xx: for installed controllers. Using defaults\n"
3815"aic79xx: Please update the aic79xx_dv_settings array in"
3816"aic79xx: the aic79xx_osm.c source file.\n");
3817 warned_user++;
3818 }
3819 dv = -1;
3820 } else {
3821
3822 dv = aic79xx_dv_settings[ahd->unit];
3823 }
3824
3825 if (dv < 0) {
3826 /*
3827 * Apply the default.
3828 */
3829 dv = 1;
3830 if (ahd->seep_config != 0)
3831 dv = (ahd->seep_config->bios_control & CFENABLEDV);
3832 }
3833 return (dv);
3834}
3835
3836static void
3837ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd)
3838{
3839 static int warned_user;
3840 u_int rd_strm_mask;
3841 u_int target_id;
3842
3843 /*
3844 * If we have specific read streaming info for this controller,
3845 * apply it. Otherwise use the defaults.
3846 */
3847 if (ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {
3848
3849 if (warned_user == 0) {
3850
3851 printf(KERN_WARNING
3852"aic79xx: WARNING: Insufficient rd_strm instances\n"
3853"aic79xx: for installed controllers. Using defaults\n"
3854"aic79xx: Please update the aic79xx_rd_strm_info array\n"
3855"aic79xx: in the aic79xx_osm.c source file.\n");
3856 warned_user++;
3857 }
3858 rd_strm_mask = AIC79XX_CONFIGED_RD_STRM;
3859 } else {
3860
3861 rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];
3862 }
3863 for (target_id = 0; target_id < 16; target_id++) {
3864 struct ahd_devinfo devinfo;
3865 struct ahd_initiator_tinfo *tinfo;
3866 struct ahd_tmode_tstate *tstate;
3867
3868 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
3869 target_id, &tstate);
3870 ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
3871 CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
3872 tinfo->user.ppr_options &= ~MSG_EXT_PPR_RD_STRM;
3873 if ((rd_strm_mask & devinfo.target_mask) != 0)
3874 tinfo->user.ppr_options |= MSG_EXT_PPR_RD_STRM;
3875 }
3876}
3877
3878/* 1398/*
3879 * Determines the queue depth for a given device. 1399 * Determines the queue depth for a given device.
3880 */ 1400 */
3881static void 1401static void
3882ahd_linux_device_queue_depth(struct ahd_softc *ahd, 1402ahd_linux_device_queue_depth(struct scsi_device *sdev)
3883 struct ahd_linux_device *dev)
3884{ 1403{
3885 struct ahd_devinfo devinfo; 1404 struct ahd_devinfo devinfo;
3886 u_int tags; 1405 u_int tags;
1406 struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata);
3887 1407
3888 ahd_compile_devinfo(&devinfo, 1408 ahd_compile_devinfo(&devinfo,
3889 ahd->our_id, 1409 ahd->our_id,
3890 dev->target->target, dev->lun, 1410 sdev->sdev_target->id, sdev->lun,
3891 dev->target->channel == 0 ? 'A' : 'B', 1411 sdev->sdev_target->channel == 0 ? 'A' : 'B',
3892 ROLE_INITIATOR); 1412 ROLE_INITIATOR);
3893 tags = ahd_linux_user_tagdepth(ahd, &devinfo); 1413 tags = ahd_linux_user_tagdepth(ahd, &devinfo);
3894 if (tags != 0 1414 if (tags != 0 && sdev->tagged_supported != 0) {
3895 && dev->scsi_device != NULL
3896 && dev->scsi_device->tagged_supported != 0) {
3897 1415
3898 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED); 1416 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED);
3899 ahd_print_devinfo(ahd, &devinfo); 1417 ahd_print_devinfo(ahd, &devinfo);
@@ -3903,11 +1421,10 @@ ahd_linux_device_queue_depth(struct ahd_softc *ahd,
3903 } 1421 }
3904} 1422}
3905 1423
3906static void 1424static int
3907ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev) 1425ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1426 struct scsi_cmnd *cmd)
3908{ 1427{
3909 struct ahd_cmd *acmd;
3910 struct scsi_cmnd *cmd;
3911 struct scb *scb; 1428 struct scb *scb;
3912 struct hardware_scb *hscb; 1429 struct hardware_scb *hscb;
3913 struct ahd_initiator_tinfo *tinfo; 1430 struct ahd_initiator_tinfo *tinfo;
@@ -3915,157 +1432,122 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
3915 u_int col_idx; 1432 u_int col_idx;
3916 uint16_t mask; 1433 uint16_t mask;
3917 1434
3918 if ((dev->flags & AHD_DEV_ON_RUN_LIST) != 0) 1435 /*
3919 panic("running device on run list"); 1436 * Get an scb to use.
3920 1437 */
3921 while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL 1438 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
3922 && dev->openings > 0 && dev->qfrozen == 0) { 1439 cmd->device->id, &tstate);
3923 1440 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
3924 /* 1441 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3925 * Schedule us to run later. The only reason we are not 1442 col_idx = AHD_NEVER_COL_IDX;
3926 * running is because the whole controller Q is frozen. 1443 } else {
3927 */ 1444 col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
3928 if (ahd->platform_data->qfrozen != 0 1445 cmd->device->lun);
3929 && AHD_DV_SIMQ_FROZEN(ahd) == 0) { 1446 }
3930 1447 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
3931 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, 1448 ahd->flags |= AHD_RESOURCE_SHORTAGE;
3932 dev, links); 1449 return SCSI_MLQUEUE_HOST_BUSY;
3933 dev->flags |= AHD_DEV_ON_RUN_LIST; 1450 }
3934 return;
3935 }
3936
3937 cmd = &acmd_scsi_cmd(acmd);
3938 1451
3939 /* 1452 scb->io_ctx = cmd;
3940 * Get an scb to use. 1453 scb->platform_data->dev = dev;
3941 */ 1454 hscb = scb->hscb;
3942 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 1455 cmd->host_scribble = (char *)scb;
3943 cmd->device->id, &tstate);
3944 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
3945 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3946 col_idx = AHD_NEVER_COL_IDX;
3947 } else {
3948 col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
3949 cmd->device->lun);
3950 }
3951 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
3952 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
3953 dev, links);
3954 dev->flags |= AHD_DEV_ON_RUN_LIST;
3955 ahd->flags |= AHD_RESOURCE_SHORTAGE;
3956 return;
3957 }
3958 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
3959 scb->io_ctx = cmd;
3960 scb->platform_data->dev = dev;
3961 hscb = scb->hscb;
3962 cmd->host_scribble = (char *)scb;
3963 1456
3964 /* 1457 /*
3965 * Fill out basics of the HSCB. 1458 * Fill out basics of the HSCB.
3966 */ 1459 */
3967 hscb->control = 0; 1460 hscb->control = 0;
3968 hscb->scsiid = BUILD_SCSIID(ahd, cmd); 1461 hscb->scsiid = BUILD_SCSIID(ahd, cmd);
3969 hscb->lun = cmd->device->lun; 1462 hscb->lun = cmd->device->lun;
3970 scb->hscb->task_management = 0; 1463 scb->hscb->task_management = 0;
3971 mask = SCB_GET_TARGET_MASK(ahd, scb); 1464 mask = SCB_GET_TARGET_MASK(ahd, scb);
3972 1465
3973 if ((ahd->user_discenable & mask) != 0) 1466 if ((ahd->user_discenable & mask) != 0)
3974 hscb->control |= DISCENB; 1467 hscb->control |= DISCENB;
3975 1468
3976 if (AHD_DV_CMD(cmd) != 0) 1469 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
3977 scb->flags |= SCB_SILENT; 1470 scb->flags |= SCB_PACKETIZED;
3978 1471
3979 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) 1472 if ((tstate->auto_negotiate & mask) != 0) {
3980 scb->flags |= SCB_PACKETIZED; 1473 scb->flags |= SCB_AUTO_NEGOTIATE;
1474 scb->hscb->control |= MK_MESSAGE;
1475 }
3981 1476
3982 if ((tstate->auto_negotiate & mask) != 0) { 1477 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
3983 scb->flags |= SCB_AUTO_NEGOTIATE; 1478 int msg_bytes;
3984 scb->hscb->control |= MK_MESSAGE; 1479 uint8_t tag_msgs[2];
3985 }
3986 1480
3987 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { 1481 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
3988#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1482 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
3989 int msg_bytes; 1483 hscb->control |= tag_msgs[0];
3990 uint8_t tag_msgs[2]; 1484 if (tag_msgs[0] == MSG_ORDERED_TASK)
3991
3992 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
3993 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
3994 hscb->control |= tag_msgs[0];
3995 if (tag_msgs[0] == MSG_ORDERED_TASK)
3996 dev->commands_since_idle_or_otag = 0;
3997 } else
3998#endif
3999 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
4000 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
4001 hscb->control |= MSG_ORDERED_TASK;
4002 dev->commands_since_idle_or_otag = 0; 1485 dev->commands_since_idle_or_otag = 0;
4003 } else { 1486 } else
4004 hscb->control |= MSG_SIMPLE_TASK; 1487 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
4005 } 1488 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
1489 hscb->control |= MSG_ORDERED_TASK;
1490 dev->commands_since_idle_or_otag = 0;
1491 } else {
1492 hscb->control |= MSG_SIMPLE_TASK;
4006 } 1493 }
1494 }
4007 1495
4008 hscb->cdb_len = cmd->cmd_len; 1496 hscb->cdb_len = cmd->cmd_len;
4009 memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len); 1497 memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
4010 1498
4011 scb->sg_count = 0; 1499 scb->platform_data->xfer_len = 0;
4012 ahd_set_residual(scb, 0); 1500 ahd_set_residual(scb, 0);
4013 ahd_set_sense_residual(scb, 0); 1501 ahd_set_sense_residual(scb, 0);
4014 if (cmd->use_sg != 0) { 1502 scb->sg_count = 0;
4015 void *sg; 1503 if (cmd->use_sg != 0) {
4016 struct scatterlist *cur_seg; 1504 void *sg;
4017 u_int nseg; 1505 struct scatterlist *cur_seg;
4018 int dir; 1506 u_int nseg;
4019 1507 int dir;
4020 cur_seg = (struct scatterlist *)cmd->request_buffer; 1508
4021 dir = cmd->sc_data_direction; 1509 cur_seg = (struct scatterlist *)cmd->request_buffer;
4022 nseg = pci_map_sg(ahd->dev_softc, cur_seg, 1510 dir = cmd->sc_data_direction;
4023 cmd->use_sg, dir); 1511 nseg = pci_map_sg(ahd->dev_softc, cur_seg,
4024 scb->platform_data->xfer_len = 0; 1512 cmd->use_sg, dir);
4025 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) { 1513 scb->platform_data->xfer_len = 0;
4026 dma_addr_t addr; 1514 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
4027 bus_size_t len;
4028
4029 addr = sg_dma_address(cur_seg);
4030 len = sg_dma_len(cur_seg);
4031 scb->platform_data->xfer_len += len;
4032 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
4033 /*last*/nseg == 1);
4034 }
4035 } else if (cmd->request_bufflen != 0) {
4036 void *sg;
4037 dma_addr_t addr; 1515 dma_addr_t addr;
4038 int dir; 1516 bus_size_t len;
4039
4040 sg = scb->sg_list;
4041 dir = cmd->sc_data_direction;
4042 addr = pci_map_single(ahd->dev_softc,
4043 cmd->request_buffer,
4044 cmd->request_bufflen, dir);
4045 scb->platform_data->xfer_len = cmd->request_bufflen;
4046 scb->platform_data->buf_busaddr = addr;
4047 sg = ahd_sg_setup(ahd, scb, sg, addr,
4048 cmd->request_bufflen, /*last*/TRUE);
4049 }
4050 1517
4051 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1518 addr = sg_dma_address(cur_seg);
4052 dev->openings--; 1519 len = sg_dma_len(cur_seg);
4053 dev->active++; 1520 scb->platform_data->xfer_len += len;
4054 dev->commands_issued++; 1521 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
4055 1522 /*last*/nseg == 1);
4056 /* Update the error counting bucket and dump if needed */
4057 if (dev->target->cmds_since_error) {
4058 dev->target->cmds_since_error++;
4059 if (dev->target->cmds_since_error >
4060 AHD_LINUX_ERR_THRESH)
4061 dev->target->cmds_since_error = 0;
4062 } 1523 }
1524 } else if (cmd->request_bufflen != 0) {
1525 void *sg;
1526 dma_addr_t addr;
1527 int dir;
4063 1528
4064 if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0) 1529 sg = scb->sg_list;
4065 dev->commands_since_idle_or_otag++; 1530 dir = cmd->sc_data_direction;
4066 scb->flags |= SCB_ACTIVE; 1531 addr = pci_map_single(ahd->dev_softc,
4067 ahd_queue_scb(ahd, scb); 1532 cmd->request_buffer,
1533 cmd->request_bufflen, dir);
1534 scb->platform_data->xfer_len = cmd->request_bufflen;
1535 scb->platform_data->buf_busaddr = addr;
1536 sg = ahd_sg_setup(ahd, scb, sg, addr,
1537 cmd->request_bufflen, /*last*/TRUE);
4068 } 1538 }
1539
1540 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1541 dev->openings--;
1542 dev->active++;
1543 dev->commands_issued++;
1544
1545 if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
1546 dev->commands_since_idle_or_otag++;
1547 scb->flags |= SCB_ACTIVE;
1548 ahd_queue_scb(ahd, scb);
1549
1550 return 0;
4069} 1551}
4070 1552
4071/* 1553/*
@@ -4081,9 +1563,6 @@ ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
4081 ahd = (struct ahd_softc *) dev_id; 1563 ahd = (struct ahd_softc *) dev_id;
4082 ahd_lock(ahd, &flags); 1564 ahd_lock(ahd, &flags);
4083 ours = ahd_intr(ahd); 1565 ours = ahd_intr(ahd);
4084 if (ahd_linux_next_device_to_run(ahd) != NULL)
4085 ahd_schedule_runq(ahd);
4086 ahd_linux_run_complete_queue(ahd);
4087 ahd_unlock(ahd, &flags); 1566 ahd_unlock(ahd, &flags);
4088 return IRQ_RETVAL(ours); 1567 return IRQ_RETVAL(ours);
4089} 1568}
@@ -4092,111 +1571,6 @@ void
4092ahd_platform_flushwork(struct ahd_softc *ahd) 1571ahd_platform_flushwork(struct ahd_softc *ahd)
4093{ 1572{
4094 1573
4095 while (ahd_linux_run_complete_queue(ahd) != NULL)
4096 ;
4097}
4098
4099static struct ahd_linux_target*
4100ahd_linux_alloc_target(struct ahd_softc *ahd, u_int channel, u_int target)
4101{
4102 struct ahd_linux_target *targ;
4103
4104 targ = malloc(sizeof(*targ), M_DEVBUF, M_NOWAIT);
4105 if (targ == NULL)
4106 return (NULL);
4107 memset(targ, 0, sizeof(*targ));
4108 targ->channel = channel;
4109 targ->target = target;
4110 targ->ahd = ahd;
4111 targ->flags = AHD_DV_REQUIRED;
4112 ahd->platform_data->targets[target] = targ;
4113 return (targ);
4114}
4115
4116static void
4117ahd_linux_free_target(struct ahd_softc *ahd, struct ahd_linux_target *targ)
4118{
4119 struct ahd_devinfo devinfo;
4120 struct ahd_initiator_tinfo *tinfo;
4121 struct ahd_tmode_tstate *tstate;
4122 u_int our_id;
4123 u_int target_offset;
4124 char channel;
4125
4126 /*
4127 * Force a negotiation to async/narrow on any
4128 * future command to this device unless a bus
4129 * reset occurs between now and that command.
4130 */
4131 channel = 'A' + targ->channel;
4132 our_id = ahd->our_id;
4133 target_offset = targ->target;
4134 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
4135 targ->target, &tstate);
4136 ahd_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
4137 channel, ROLE_INITIATOR);
4138 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
4139 AHD_TRANS_GOAL, /*paused*/FALSE);
4140 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4141 AHD_TRANS_GOAL, /*paused*/FALSE);
4142 ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS);
4143 ahd->platform_data->targets[target_offset] = NULL;
4144 if (targ->inq_data != NULL)
4145 free(targ->inq_data, M_DEVBUF);
4146 if (targ->dv_buffer != NULL)
4147 free(targ->dv_buffer, M_DEVBUF);
4148 if (targ->dv_buffer1 != NULL)
4149 free(targ->dv_buffer1, M_DEVBUF);
4150 free(targ, M_DEVBUF);
4151}
4152
4153static struct ahd_linux_device*
4154ahd_linux_alloc_device(struct ahd_softc *ahd,
4155 struct ahd_linux_target *targ, u_int lun)
4156{
4157 struct ahd_linux_device *dev;
4158
4159 dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
4160 if (dev == NULL)
4161 return (NULL);
4162 memset(dev, 0, sizeof(*dev));
4163 init_timer(&dev->timer);
4164 TAILQ_INIT(&dev->busyq);
4165 dev->flags = AHD_DEV_UNCONFIGURED;
4166 dev->lun = lun;
4167 dev->target = targ;
4168
4169 /*
4170 * We start out life using untagged
4171 * transactions of which we allow one.
4172 */
4173 dev->openings = 1;
4174
4175 /*
4176 * Set maxtags to 0. This will be changed if we
4177 * later determine that we are dealing with
4178 * a tagged queuing capable device.
4179 */
4180 dev->maxtags = 0;
4181
4182 targ->refcount++;
4183 targ->devices[lun] = dev;
4184 return (dev);
4185}
4186
4187static void
4188ahd_linux_free_device(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4189{
4190 struct ahd_linux_target *targ;
4191
4192 del_timer(&dev->timer);
4193 targ = dev->target;
4194 targ->devices[dev->lun] = NULL;
4195 free(dev, M_DEVBUF);
4196 targ->refcount--;
4197 if (targ->refcount == 0
4198 && (targ->flags & AHD_DV_REQUIRED) == 0)
4199 ahd_linux_free_target(ahd, targ);
4200} 1574}
4201 1575
4202void 1576void
@@ -4207,10 +1581,14 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4207 case AC_TRANSFER_NEG: 1581 case AC_TRANSFER_NEG:
4208 { 1582 {
4209 char buf[80]; 1583 char buf[80];
1584 struct scsi_target *starget;
4210 struct ahd_linux_target *targ; 1585 struct ahd_linux_target *targ;
4211 struct info_str info; 1586 struct info_str info;
4212 struct ahd_initiator_tinfo *tinfo; 1587 struct ahd_initiator_tinfo *tinfo;
4213 struct ahd_tmode_tstate *tstate; 1588 struct ahd_tmode_tstate *tstate;
1589 unsigned int target_ppr_options;
1590
1591 BUG_ON(target == CAM_TARGET_WILDCARD);
4214 1592
4215 info.buffer = buf; 1593 info.buffer = buf;
4216 info.length = sizeof(buf); 1594 info.length = sizeof(buf);
@@ -4234,58 +1612,47 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4234 * Don't bother reporting results that 1612 * Don't bother reporting results that
4235 * are identical to those last reported. 1613 * are identical to those last reported.
4236 */ 1614 */
4237 targ = ahd->platform_data->targets[target]; 1615 starget = ahd->platform_data->starget[target];
4238 if (targ == NULL) 1616 if (starget == NULL)
4239 break; 1617 break;
4240 if (tinfo->curr.period == targ->last_tinfo.period 1618 targ = scsi_transport_target_data(starget);
4241 && tinfo->curr.width == targ->last_tinfo.width 1619
4242 && tinfo->curr.offset == targ->last_tinfo.offset 1620 target_ppr_options =
4243 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options) 1621 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
1622 + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
1623 + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0)
1624 + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0)
1625 + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0)
1626 + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0)
1627 + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0)
1628 + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0);
1629
1630 if (tinfo->curr.period == spi_period(starget)
1631 && tinfo->curr.width == spi_width(starget)
1632 && tinfo->curr.offset == spi_offset(starget)
1633 && tinfo->curr.ppr_options == target_ppr_options)
4244 if (bootverbose == 0) 1634 if (bootverbose == 0)
4245 break; 1635 break;
4246 1636
4247 targ->last_tinfo.period = tinfo->curr.period; 1637 spi_period(starget) = tinfo->curr.period;
4248 targ->last_tinfo.width = tinfo->curr.width; 1638 spi_width(starget) = tinfo->curr.width;
4249 targ->last_tinfo.offset = tinfo->curr.offset; 1639 spi_offset(starget) = tinfo->curr.offset;
4250 targ->last_tinfo.ppr_options = tinfo->curr.ppr_options; 1640 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
4251 1641 spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
4252 printf("(%s:%c:", ahd_name(ahd), channel); 1642 spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
4253 if (target == CAM_TARGET_WILDCARD) 1643 spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
4254 printf("*): "); 1644 spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
4255 else 1645 spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0;
4256 printf("%d): ", target); 1646 spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
4257 ahd_format_transinfo(&info, &tinfo->curr); 1647 spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
4258 if (info.pos < info.length) 1648 spi_display_xfer_agreement(starget);
4259 *info.buffer = '\0';
4260 else
4261 buf[info.length - 1] = '\0';
4262 printf("%s", buf);
4263 break; 1649 break;
4264 } 1650 }
4265 case AC_SENT_BDR: 1651 case AC_SENT_BDR:
4266 { 1652 {
4267#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
4268 WARN_ON(lun != CAM_LUN_WILDCARD); 1653 WARN_ON(lun != CAM_LUN_WILDCARD);
4269 scsi_report_device_reset(ahd->platform_data->host, 1654 scsi_report_device_reset(ahd->platform_data->host,
4270 channel - 'A', target); 1655 channel - 'A', target);
4271#else
4272 Scsi_Device *scsi_dev;
4273
4274 /*
4275 * Find the SCSI device associated with this
4276 * request and indicate that a UA is expected.
4277 */
4278 for (scsi_dev = ahd->platform_data->host->host_queue;
4279 scsi_dev != NULL; scsi_dev = scsi_dev->next) {
4280 if (channel - 'A' == scsi_dev->channel
4281 && target == scsi_dev->id
4282 && (lun == CAM_LUN_WILDCARD
4283 || lun == scsi_dev->lun)) {
4284 scsi_dev->was_reset = 1;
4285 scsi_dev->expecting_cc_ua = 1;
4286 }
4287 }
4288#endif
4289 break; 1656 break;
4290 } 1657 }
4291 case AC_BUS_RESET: 1658 case AC_BUS_RESET:
@@ -4305,7 +1672,7 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4305void 1672void
4306ahd_done(struct ahd_softc *ahd, struct scb *scb) 1673ahd_done(struct ahd_softc *ahd, struct scb *scb)
4307{ 1674{
4308 Scsi_Cmnd *cmd; 1675 struct scsi_cmnd *cmd;
4309 struct ahd_linux_device *dev; 1676 struct ahd_linux_device *dev;
4310 1677
4311 if ((scb->flags & SCB_ACTIVE) == 0) { 1678 if ((scb->flags & SCB_ACTIVE) == 0) {
@@ -4373,19 +1740,8 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
4373 ahd_set_transaction_status(scb, CAM_REQ_CMP); 1740 ahd_set_transaction_status(scb, CAM_REQ_CMP);
4374 } 1741 }
4375 } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { 1742 } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
4376 ahd_linux_handle_scsi_status(ahd, dev, scb); 1743 ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
4377 } else if (ahd_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
4378 dev->flags |= AHD_DEV_UNCONFIGURED;
4379 if (AHD_DV_CMD(cmd) == FALSE)
4380 dev->target->flags &= ~AHD_DV_REQUIRED;
4381 } 1744 }
4382 /*
4383 * Start DV for devices that require it assuming the first command
4384 * sent does not result in a selection timeout.
4385 */
4386 if (ahd_get_transaction_status(scb) != CAM_SEL_TIMEOUT
4387 && (dev->target->flags & AHD_DV_REQUIRED) != 0)
4388 ahd_linux_start_dv(ahd);
4389 1745
4390 if (dev->openings == 1 1746 if (dev->openings == 1
4391 && ahd_get_transaction_status(scb) == CAM_REQ_CMP 1747 && ahd_get_transaction_status(scb) == CAM_REQ_CMP
@@ -4406,47 +1762,32 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
4406 if (dev->active == 0) 1762 if (dev->active == 0)
4407 dev->commands_since_idle_or_otag = 0; 1763 dev->commands_since_idle_or_otag = 0;
4408 1764
4409 if (TAILQ_EMPTY(&dev->busyq)) {
4410 if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
4411 && dev->active == 0
4412 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
4413 ahd_linux_free_device(ahd, dev);
4414 } else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
4415 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
4416 dev->flags |= AHD_DEV_ON_RUN_LIST;
4417 }
4418
4419 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 1765 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4420 printf("Recovery SCB completes\n"); 1766 printf("Recovery SCB completes\n");
4421 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT 1767 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
4422 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) 1768 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
4423 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1769 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
4424 if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) { 1770 if ((ahd->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
4425 scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM; 1771 ahd->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
4426 up(&ahd->platform_data->eh_sem); 1772 up(&ahd->platform_data->eh_sem);
4427 } 1773 }
4428 } 1774 }
4429 1775
4430 ahd_free_scb(ahd, scb); 1776 ahd_free_scb(ahd, scb);
4431 ahd_linux_queue_cmd_complete(ahd, cmd); 1777 ahd_linux_queue_cmd_complete(ahd, cmd);
4432
4433 if ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_EMPTY) != 0
4434 && LIST_FIRST(&ahd->pending_scbs) == NULL) {
4435 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_EMPTY;
4436 up(&ahd->platform_data->dv_sem);
4437 }
4438} 1778}
4439 1779
4440static void 1780static void
4441ahd_linux_handle_scsi_status(struct ahd_softc *ahd, 1781ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4442 struct ahd_linux_device *dev, struct scb *scb) 1782 struct scsi_device *sdev, struct scb *scb)
4443{ 1783{
4444 struct ahd_devinfo devinfo; 1784 struct ahd_devinfo devinfo;
1785 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
4445 1786
4446 ahd_compile_devinfo(&devinfo, 1787 ahd_compile_devinfo(&devinfo,
4447 ahd->our_id, 1788 ahd->our_id,
4448 dev->target->target, dev->lun, 1789 sdev->sdev_target->id, sdev->lun,
4449 dev->target->channel == 0 ? 'A' : 'B', 1790 sdev->sdev_target->channel == 0 ? 'A' : 'B',
4450 ROLE_INITIATOR); 1791 ROLE_INITIATOR);
4451 1792
4452 /* 1793 /*
@@ -4465,7 +1806,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4465 case SCSI_STATUS_CHECK_COND: 1806 case SCSI_STATUS_CHECK_COND:
4466 case SCSI_STATUS_CMD_TERMINATED: 1807 case SCSI_STATUS_CMD_TERMINATED:
4467 { 1808 {
4468 Scsi_Cmnd *cmd; 1809 struct scsi_cmnd *cmd;
4469 1810
4470 /* 1811 /*
4471 * Copy sense information to the OS's cmd 1812 * Copy sense information to the OS's cmd
@@ -4518,7 +1859,6 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4518 break; 1859 break;
4519 } 1860 }
4520 case SCSI_STATUS_QUEUE_FULL: 1861 case SCSI_STATUS_QUEUE_FULL:
4521 {
4522 /* 1862 /*
4523 * By the time the core driver has returned this 1863 * By the time the core driver has returned this
4524 * command, all other commands that were queued 1864 * command, all other commands that were queued
@@ -4579,98 +1919,23 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4579 (dev->flags & AHD_DEV_Q_BASIC) 1919 (dev->flags & AHD_DEV_Q_BASIC)
4580 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1920 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
4581 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY); 1921 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
4582 /* FALLTHROUGH */
4583 }
4584 case SCSI_STATUS_BUSY:
4585 /*
4586 * Set a short timer to defer sending commands for
4587 * a bit since Linux will not delay in this case.
4588 */
4589 if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {
4590 printf("%s:%c:%d: Device Timer still active during "
4591 "busy processing\n", ahd_name(ahd),
4592 dev->target->channel, dev->target->target);
4593 break;
4594 }
4595 dev->flags |= AHD_DEV_TIMER_ACTIVE;
4596 dev->qfrozen++;
4597 init_timer(&dev->timer);
4598 dev->timer.data = (u_long)dev;
4599 dev->timer.expires = jiffies + (HZ/2);
4600 dev->timer.function = ahd_linux_dev_timed_unfreeze;
4601 add_timer(&dev->timer);
4602 break;
4603 } 1922 }
4604} 1923}
4605 1924
4606static void 1925static void
4607ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd) 1926ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
4608{ 1927{
4609 /* 1928 /*
4610 * Typically, the complete queue has very few entries
4611 * queued to it before the queue is emptied by
4612 * ahd_linux_run_complete_queue, so sorting the entries
4613 * by generation number should be inexpensive.
4614 * We perform the sort so that commands that complete
4615 * with an error are retuned in the order origionally
4616 * queued to the controller so that any subsequent retries
4617 * are performed in order. The underlying ahd routines do
4618 * not guarantee the order that aborted commands will be
4619 * returned to us.
4620 */
4621 struct ahd_completeq *completeq;
4622 struct ahd_cmd *list_cmd;
4623 struct ahd_cmd *acmd;
4624
4625 /*
4626 * Map CAM error codes into Linux Error codes. We 1929 * Map CAM error codes into Linux Error codes. We
4627 * avoid the conversion so that the DV code has the 1930 * avoid the conversion so that the DV code has the
4628 * full error information available when making 1931 * full error information available when making
4629 * state change decisions. 1932 * state change decisions.
4630 */ 1933 */
4631 if (AHD_DV_CMD(cmd) == FALSE) { 1934 {
4632 uint32_t status; 1935 uint32_t status;
4633 u_int new_status; 1936 u_int new_status;
4634 1937
4635 status = ahd_cmd_get_transaction_status(cmd); 1938 status = ahd_cmd_get_transaction_status(cmd);
4636 if (status != CAM_REQ_CMP) {
4637 struct ahd_linux_device *dev;
4638 struct ahd_devinfo devinfo;
4639 cam_status cam_status;
4640 uint32_t action;
4641 u_int scsi_status;
4642
4643 dev = ahd_linux_get_device(ahd, cmd->device->channel,
4644 cmd->device->id,
4645 cmd->device->lun,
4646 /*alloc*/FALSE);
4647
4648 if (dev == NULL)
4649 goto no_fallback;
4650
4651 ahd_compile_devinfo(&devinfo,
4652 ahd->our_id,
4653 dev->target->target, dev->lun,
4654 dev->target->channel == 0 ? 'A':'B',
4655 ROLE_INITIATOR);
4656
4657 scsi_status = ahd_cmd_get_scsi_status(cmd);
4658 cam_status = ahd_cmd_get_transaction_status(cmd);
4659 action = aic_error_action(cmd, dev->target->inq_data,
4660 cam_status, scsi_status);
4661 if ((action & SSQ_FALLBACK) != 0) {
4662
4663 /* Update stats */
4664 dev->target->errors_detected++;
4665 if (dev->target->cmds_since_error == 0)
4666 dev->target->cmds_since_error++;
4667 else {
4668 dev->target->cmds_since_error = 0;
4669 ahd_linux_fallback(ahd, &devinfo);
4670 }
4671 }
4672 }
4673no_fallback:
4674 switch (status) { 1939 switch (status) {
4675 case CAM_REQ_INPROG: 1940 case CAM_REQ_INPROG:
4676 case CAM_REQ_CMP: 1941 case CAM_REQ_CMP:
@@ -4715,26 +1980,7 @@ no_fallback:
4715 new_status = DID_ERROR; 1980 new_status = DID_ERROR;
4716 break; 1981 break;
4717 case CAM_REQUEUE_REQ: 1982 case CAM_REQUEUE_REQ:
4718 /* 1983 new_status = DID_REQUEUE;
4719 * If we want the request requeued, make sure there
4720 * are sufficent retries. In the old scsi error code,
4721 * we used to be able to specify a result code that
4722 * bypassed the retry count. Now we must use this
4723 * hack. We also "fake" a check condition with
4724 * a sense code of ABORTED COMMAND. This seems to
4725 * evoke a retry even if this command is being sent
4726 * via the eh thread. Ick! Ick! Ick!
4727 */
4728 if (cmd->retries > 0)
4729 cmd->retries--;
4730 new_status = DID_OK;
4731 ahd_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
4732 cmd->result |= (DRIVER_SENSE << 24);
4733 memset(cmd->sense_buffer, 0,
4734 sizeof(cmd->sense_buffer));
4735 cmd->sense_buffer[0] = SSD_ERRCODE_VALID
4736 | SSD_CURRENT_ERROR;
4737 cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
4738 break; 1984 break;
4739 default: 1985 default:
4740 /* We should never get here */ 1986 /* We should never get here */
@@ -4745,116 +1991,23 @@ no_fallback:
4745 ahd_cmd_set_transaction_status(cmd, new_status); 1991 ahd_cmd_set_transaction_status(cmd, new_status);
4746 } 1992 }
4747 1993
4748 completeq = &ahd->platform_data->completeq; 1994 cmd->scsi_done(cmd);
4749 list_cmd = TAILQ_FIRST(completeq);
4750 acmd = (struct ahd_cmd *)cmd;
4751 while (list_cmd != NULL
4752 && acmd_scsi_cmd(list_cmd).serial_number
4753 < acmd_scsi_cmd(acmd).serial_number)
4754 list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
4755 if (list_cmd != NULL)
4756 TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
4757 else
4758 TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
4759} 1995}
4760 1996
4761static void 1997static void
4762ahd_linux_filter_inquiry(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 1998ahd_linux_sem_timeout(u_long arg)
4763{ 1999{
4764 struct scsi_inquiry_data *sid; 2000 struct ahd_softc *ahd;
4765 struct ahd_initiator_tinfo *tinfo; 2001 u_long s;
4766 struct ahd_transinfo *user;
4767 struct ahd_transinfo *goal;
4768 struct ahd_transinfo *curr;
4769 struct ahd_tmode_tstate *tstate;
4770 struct ahd_linux_device *dev;
4771 u_int width;
4772 u_int period;
4773 u_int offset;
4774 u_int ppr_options;
4775 u_int trans_version;
4776 u_int prot_version;
4777
4778 /*
4779 * Determine if this lun actually exists. If so,
4780 * hold on to its corresponding device structure.
4781 * If not, make sure we release the device and
4782 * don't bother processing the rest of this inquiry
4783 * command.
4784 */
4785 dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
4786 devinfo->target, devinfo->lun,
4787 /*alloc*/TRUE);
4788
4789 sid = (struct scsi_inquiry_data *)dev->target->inq_data;
4790 if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
4791
4792 dev->flags &= ~AHD_DEV_UNCONFIGURED;
4793 } else {
4794 dev->flags |= AHD_DEV_UNCONFIGURED;
4795 return;
4796 }
4797 2002
4798 /* 2003 ahd = (struct ahd_softc *)arg;
4799 * Update our notion of this device's transfer
4800 * negotiation capabilities.
4801 */
4802 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4803 devinfo->our_scsiid,
4804 devinfo->target, &tstate);
4805 user = &tinfo->user;
4806 goal = &tinfo->goal;
4807 curr = &tinfo->curr;
4808 width = user->width;
4809 period = user->period;
4810 offset = user->offset;
4811 ppr_options = user->ppr_options;
4812 trans_version = user->transport_version;
4813 prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
4814 2004
4815 /* 2005 ahd_lock(ahd, &s);
4816 * Only attempt SPI3/4 once we've verified that 2006 if ((ahd->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
4817 * the device claims to support SPI3/4 features. 2007 ahd->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
4818 */ 2008 up(&ahd->platform_data->eh_sem);
4819 if (prot_version < SCSI_REV_2)
4820 trans_version = SID_ANSI_REV(sid);
4821 else
4822 trans_version = SCSI_REV_2;
4823
4824 if ((sid->flags & SID_WBus16) == 0)
4825 width = MSG_EXT_WDTR_BUS_8_BIT;
4826 if ((sid->flags & SID_Sync) == 0) {
4827 period = 0;
4828 offset = 0;
4829 ppr_options = 0;
4830 }
4831 if ((sid->spi3data & SID_SPI_QAS) == 0)
4832 ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
4833 if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
4834 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4835 if ((sid->spi3data & SID_SPI_IUS) == 0)
4836 ppr_options &= (MSG_EXT_PPR_DT_REQ
4837 | MSG_EXT_PPR_QAS_REQ);
4838
4839 if (prot_version > SCSI_REV_2
4840 && ppr_options != 0)
4841 trans_version = user->transport_version;
4842
4843 ahd_validate_width(ahd, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
4844 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
4845 ahd_validate_offset(ahd, /*tinfo limit*/NULL, period,
4846 &offset, width, ROLE_UNKNOWN);
4847 if (offset == 0 || period == 0) {
4848 period = 0;
4849 offset = 0;
4850 ppr_options = 0;
4851 } 2009 }
4852 /* Apply our filtered user settings. */ 2010 ahd_unlock(ahd, &s);
4853 curr->transport_version = trans_version;
4854 curr->protocol_version = prot_version;
4855 ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, /*paused*/FALSE);
4856 ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options,
4857 AHD_TRANS_GOAL, /*paused*/FALSE);
4858} 2011}
4859 2012
4860void 2013void
@@ -4882,12 +2035,6 @@ ahd_release_simq(struct ahd_softc *ahd)
4882 if (ahd->platform_data->qfrozen == 0) { 2035 if (ahd->platform_data->qfrozen == 0) {
4883 unblock_reqs = 1; 2036 unblock_reqs = 1;
4884 } 2037 }
4885 if (AHD_DV_SIMQ_FROZEN(ahd)
4886 && ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_RELEASE) != 0)) {
4887 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_RELEASE;
4888 up(&ahd->platform_data->dv_sem);
4889 }
4890 ahd_schedule_runq(ahd);
4891 ahd_unlock(ahd, &s); 2038 ahd_unlock(ahd, &s);
4892 /* 2039 /*
4893 * There is still a race here. The mid-layer 2040 * There is still a race here. The mid-layer
@@ -4899,118 +2046,743 @@ ahd_release_simq(struct ahd_softc *ahd)
4899 scsi_unblock_requests(ahd->platform_data->host); 2046 scsi_unblock_requests(ahd->platform_data->host);
4900} 2047}
4901 2048
4902static void 2049static int
4903ahd_linux_sem_timeout(u_long arg) 2050ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
4904{ 2051{
4905 struct scb *scb; 2052 struct ahd_softc *ahd;
4906 struct ahd_softc *ahd; 2053 struct ahd_linux_device *dev;
4907 u_long s; 2054 struct scb *pending_scb;
2055 u_int saved_scbptr;
2056 u_int active_scbptr;
2057 u_int last_phase;
2058 u_int saved_scsiid;
2059 u_int cdb_byte;
2060 int retval;
2061 int was_paused;
2062 int paused;
2063 int wait;
2064 int disconnected;
2065 ahd_mode_state saved_modes;
4908 2066
4909 scb = (struct scb *)arg; 2067 pending_scb = NULL;
4910 ahd = scb->ahd_softc; 2068 paused = FALSE;
4911 ahd_lock(ahd, &s); 2069 wait = FALSE;
4912 if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) { 2070 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
4913 scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM; 2071
4914 up(&ahd->platform_data->eh_sem); 2072 printf("%s:%d:%d:%d: Attempting to queue a%s message:",
2073 ahd_name(ahd), cmd->device->channel,
2074 cmd->device->id, cmd->device->lun,
2075 flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
2076
2077 printf("CDB:");
2078 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2079 printf(" 0x%x", cmd->cmnd[cdb_byte]);
2080 printf("\n");
2081
2082 spin_lock_irq(&ahd->platform_data->spin_lock);
2083
2084 /*
2085 * First determine if we currently own this command.
2086 * Start by searching the device queue. If not found
2087 * there, check the pending_scb list. If not found
2088 * at all, and the system wanted us to just abort the
2089 * command, return success.
2090 */
2091 dev = scsi_transport_device_data(cmd->device);
2092
2093 if (dev == NULL) {
2094 /*
2095 * No target device for this command exists,
2096 * so we must not still own the command.
2097 */
2098 printf("%s:%d:%d:%d: Is not an active device\n",
2099 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2100 cmd->device->lun);
2101 retval = SUCCESS;
2102 goto no_cmd;
4915 } 2103 }
4916 ahd_unlock(ahd, &s); 2104
2105 /*
2106 * See if we can find a matching cmd in the pending list.
2107 */
2108 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
2109 if (pending_scb->io_ctx == cmd)
2110 break;
2111 }
2112
2113 if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
2114
2115 /* Any SCB for this device will do for a target reset */
2116 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
2117 if (ahd_match_scb(ahd, pending_scb, cmd->device->id,
2118 cmd->device->channel + 'A',
2119 CAM_LUN_WILDCARD,
2120 SCB_LIST_NULL, ROLE_INITIATOR) == 0)
2121 break;
2122 }
2123 }
2124
2125 if (pending_scb == NULL) {
2126 printf("%s:%d:%d:%d: Command not found\n",
2127 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2128 cmd->device->lun);
2129 goto no_cmd;
2130 }
2131
2132 if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
2133 /*
2134 * We can't queue two recovery actions using the same SCB
2135 */
2136 retval = FAILED;
2137 goto done;
2138 }
2139
2140 /*
2141 * Ensure that the card doesn't do anything
2142 * behind our back. Also make sure that we
2143 * didn't "just" miss an interrupt that would
2144 * affect this cmd.
2145 */
2146 was_paused = ahd_is_paused(ahd);
2147 ahd_pause_and_flushwork(ahd);
2148 paused = TRUE;
2149
2150 if ((pending_scb->flags & SCB_ACTIVE) == 0) {
2151 printf("%s:%d:%d:%d: Command already completed\n",
2152 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2153 cmd->device->lun);
2154 goto no_cmd;
2155 }
2156
2157 printf("%s: At time of recovery, card was %spaused\n",
2158 ahd_name(ahd), was_paused ? "" : "not ");
2159 ahd_dump_card_state(ahd);
2160
2161 disconnected = TRUE;
2162 if (flag == SCB_ABORT) {
2163 if (ahd_search_qinfifo(ahd, cmd->device->id,
2164 cmd->device->channel + 'A',
2165 cmd->device->lun,
2166 pending_scb->hscb->tag,
2167 ROLE_INITIATOR, CAM_REQ_ABORTED,
2168 SEARCH_COMPLETE) > 0) {
2169 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2170 ahd_name(ahd), cmd->device->channel,
2171 cmd->device->id, cmd->device->lun);
2172 retval = SUCCESS;
2173 goto done;
2174 }
2175 } else if (ahd_search_qinfifo(ahd, cmd->device->id,
2176 cmd->device->channel + 'A',
2177 cmd->device->lun, pending_scb->hscb->tag,
2178 ROLE_INITIATOR, /*status*/0,
2179 SEARCH_COUNT) > 0) {
2180 disconnected = FALSE;
2181 }
2182
2183 saved_modes = ahd_save_modes(ahd);
2184 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2185 last_phase = ahd_inb(ahd, LASTPHASE);
2186 saved_scbptr = ahd_get_scbptr(ahd);
2187 active_scbptr = saved_scbptr;
2188 if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
2189 struct scb *bus_scb;
2190
2191 bus_scb = ahd_lookup_scb(ahd, active_scbptr);
2192 if (bus_scb == pending_scb)
2193 disconnected = FALSE;
2194 else if (flag != SCB_ABORT
2195 && ahd_inb(ahd, SAVED_SCSIID) == pending_scb->hscb->scsiid
2196 && ahd_inb(ahd, SAVED_LUN) == SCB_GET_LUN(pending_scb))
2197 disconnected = FALSE;
2198 }
2199
2200 /*
2201 * At this point, pending_scb is the scb associated with the
2202 * passed in command. That command is currently active on the
2203 * bus or is in the disconnected state.
2204 */
2205 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2206 if (last_phase != P_BUSFREE
2207 && (SCB_GET_TAG(pending_scb) == active_scbptr
2208 || (flag == SCB_DEVICE_RESET
2209 && SCSIID_TARGET(ahd, saved_scsiid) == cmd->device->id))) {
2210
2211 /*
2212 * We're active on the bus, so assert ATN
2213 * and hope that the target responds.
2214 */
2215 pending_scb = ahd_lookup_scb(ahd, active_scbptr);
2216 pending_scb->flags |= SCB_RECOVERY_SCB|flag;
2217 ahd_outb(ahd, MSG_OUT, HOST_MSG);
2218 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
2219 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
2220 ahd_name(ahd), cmd->device->channel,
2221 cmd->device->id, cmd->device->lun);
2222 wait = TRUE;
2223 } else if (disconnected) {
2224
2225 /*
2226 * Actually re-queue this SCB in an attempt
2227 * to select the device before it reconnects.
2228 */
2229 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
2230 ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
2231 pending_scb->hscb->cdb_len = 0;
2232 pending_scb->hscb->task_attribute = 0;
2233 pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
2234
2235 if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
2236 /*
2237 * Mark the SCB has having an outstanding
2238 * task management function. Should the command
2239 * complete normally before the task management
2240 * function can be sent, the host will be notified
2241 * to abort our requeued SCB.
2242 */
2243 ahd_outb(ahd, SCB_TASK_MANAGEMENT,
2244 pending_scb->hscb->task_management);
2245 } else {
2246 /*
2247 * If non-packetized, set the MK_MESSAGE control
2248 * bit indicating that we desire to send a message.
2249 * We also set the disconnected flag since there is
2250 * no guarantee that our SCB control byte matches
2251 * the version on the card. We don't want the
2252 * sequencer to abort the command thinking an
2253 * unsolicited reselection occurred.
2254 */
2255 pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
2256
2257 /*
2258 * The sequencer will never re-reference the
2259 * in-core SCB. To make sure we are notified
2260 * during reslection, set the MK_MESSAGE flag in
2261 * the card's copy of the SCB.
2262 */
2263 ahd_outb(ahd, SCB_CONTROL,
2264 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
2265 }
2266
2267 /*
2268 * Clear out any entries in the QINFIFO first
2269 * so we are the next SCB for this target
2270 * to run.
2271 */
2272 ahd_search_qinfifo(ahd, cmd->device->id,
2273 cmd->device->channel + 'A', cmd->device->lun,
2274 SCB_LIST_NULL, ROLE_INITIATOR,
2275 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
2276 ahd_qinfifo_requeue_tail(ahd, pending_scb);
2277 ahd_set_scbptr(ahd, saved_scbptr);
2278 ahd_print_path(ahd, pending_scb);
2279 printf("Device is disconnected, re-queuing SCB\n");
2280 wait = TRUE;
2281 } else {
2282 printf("%s:%d:%d:%d: Unable to deliver message\n",
2283 ahd_name(ahd), cmd->device->channel,
2284 cmd->device->id, cmd->device->lun);
2285 retval = FAILED;
2286 goto done;
2287 }
2288
2289no_cmd:
2290 /*
2291 * Our assumption is that if we don't have the command, no
2292 * recovery action was required, so we return success. Again,
2293 * the semantics of the mid-layer recovery engine are not
2294 * well defined, so this may change in time.
2295 */
2296 retval = SUCCESS;
2297done:
2298 if (paused)
2299 ahd_unpause(ahd);
2300 if (wait) {
2301 struct timer_list timer;
2302 int ret;
2303
2304 ahd->platform_data->flags |= AHD_SCB_UP_EH_SEM;
2305 spin_unlock_irq(&ahd->platform_data->spin_lock);
2306 init_timer(&timer);
2307 timer.data = (u_long)ahd;
2308 timer.expires = jiffies + (5 * HZ);
2309 timer.function = ahd_linux_sem_timeout;
2310 add_timer(&timer);
2311 printf("Recovery code sleeping\n");
2312 down(&ahd->platform_data->eh_sem);
2313 printf("Recovery code awake\n");
2314 ret = del_timer_sync(&timer);
2315 if (ret == 0) {
2316 printf("Timer Expired\n");
2317 retval = FAILED;
2318 }
2319 spin_lock_irq(&ahd->platform_data->spin_lock);
2320 }
2321 spin_unlock_irq(&ahd->platform_data->spin_lock);
2322 return (retval);
4917} 2323}
4918 2324
4919static void 2325static void ahd_linux_set_width(struct scsi_target *starget, int width)
4920ahd_linux_dev_timed_unfreeze(u_long arg)
4921{ 2326{
4922 struct ahd_linux_device *dev; 2327 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4923 struct ahd_softc *ahd; 2328 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4924 u_long s; 2329 struct ahd_devinfo devinfo;
2330 unsigned long flags;
4925 2331
4926 dev = (struct ahd_linux_device *)arg; 2332 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
4927 ahd = dev->target->ahd; 2333 starget->channel + 'A', ROLE_INITIATOR);
4928 ahd_lock(ahd, &s); 2334 ahd_lock(ahd, &flags);
4929 dev->flags &= ~AHD_DEV_TIMER_ACTIVE; 2335 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
4930 if (dev->qfrozen > 0) 2336 ahd_unlock(ahd, &flags);
4931 dev->qfrozen--;
4932 if (dev->qfrozen == 0
4933 && (dev->flags & AHD_DEV_ON_RUN_LIST) == 0)
4934 ahd_linux_run_device_queue(ahd, dev);
4935 if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
4936 && dev->active == 0)
4937 ahd_linux_free_device(ahd, dev);
4938 ahd_unlock(ahd, &s);
4939} 2337}
4940 2338
4941void 2339static void ahd_linux_set_period(struct scsi_target *starget, int period)
4942ahd_platform_dump_card_state(struct ahd_softc *ahd)
4943{ 2340{
4944 struct ahd_linux_device *dev; 2341 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4945 int target; 2342 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4946 int maxtarget; 2343 struct ahd_tmode_tstate *tstate;
4947 int lun; 2344 struct ahd_initiator_tinfo *tinfo
4948 int i; 2345 = ahd_fetch_transinfo(ahd,
4949 2346 starget->channel + 'A',
4950 maxtarget = (ahd->features & AHD_WIDE) ? 15 : 7; 2347 shost->this_id, starget->id, &tstate);
4951 for (target = 0; target <=maxtarget; target++) { 2348 struct ahd_devinfo devinfo;
4952 2349 unsigned int ppr_options = tinfo->goal.ppr_options;
4953 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 2350 unsigned int dt;
4954 struct ahd_cmd *acmd; 2351 unsigned long flags;
4955 2352 unsigned long offset = tinfo->goal.offset;
4956 dev = ahd_linux_get_device(ahd, 0, target, 2353
4957 lun, /*alloc*/FALSE); 2354#ifdef AHD_DEBUG
4958 if (dev == NULL) 2355 if ((ahd_debug & AHD_SHOW_DV) != 0)
4959 continue; 2356 printf("%s: set period to %d\n", ahd_name(ahd), period);
4960 2357#endif
4961 printf("DevQ(%d:%d:%d): ", 0, target, lun); 2358 if (offset == 0)
4962 i = 0; 2359 offset = MAX_OFFSET;
4963 TAILQ_FOREACH(acmd, &dev->busyq, acmd_links.tqe) { 2360
4964 if (i++ > AHD_SCB_MAX) 2361 if (period < 8)
4965 break; 2362 period = 8;
4966 } 2363 if (period < 10) {
4967 printf("%d waiting\n", i); 2364 ppr_options |= MSG_EXT_PPR_DT_REQ;
4968 } 2365 if (period == 8)
2366 ppr_options |= MSG_EXT_PPR_IU_REQ;
4969 } 2367 }
2368
2369 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2370
2371 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2372 starget->channel + 'A', ROLE_INITIATOR);
2373
2374 /* all PPR requests apart from QAS require wide transfers */
2375 if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
2376 if (spi_width(starget) == 0)
2377 ppr_options &= MSG_EXT_PPR_QAS_REQ;
2378 }
2379
2380 ahd_find_syncrate(ahd, &period, &ppr_options,
2381 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2382
2383 ahd_lock(ahd, &flags);
2384 ahd_set_syncrate(ahd, &devinfo, period, offset,
2385 ppr_options, AHD_TRANS_GOAL, FALSE);
2386 ahd_unlock(ahd, &flags);
4970} 2387}
4971 2388
4972static int __init 2389static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
4973ahd_linux_init(void)
4974{ 2390{
4975#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 2391 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4976 return ahd_linux_detect(&aic79xx_driver_template); 2392 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4977#else 2393 struct ahd_tmode_tstate *tstate;
4978 scsi_register_module(MODULE_SCSI_HA, &aic79xx_driver_template); 2394 struct ahd_initiator_tinfo *tinfo
4979 if (aic79xx_driver_template.present == 0) { 2395 = ahd_fetch_transinfo(ahd,
4980 scsi_unregister_module(MODULE_SCSI_HA, 2396 starget->channel + 'A',
4981 &aic79xx_driver_template); 2397 shost->this_id, starget->id, &tstate);
4982 return (-ENODEV); 2398 struct ahd_devinfo devinfo;
2399 unsigned int ppr_options = 0;
2400 unsigned int period = 0;
2401 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2402 unsigned long flags;
2403
2404#ifdef AHD_DEBUG
2405 if ((ahd_debug & AHD_SHOW_DV) != 0)
2406 printf("%s: set offset to %d\n", ahd_name(ahd), offset);
2407#endif
2408
2409 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2410 starget->channel + 'A', ROLE_INITIATOR);
2411 if (offset != 0) {
2412 period = tinfo->goal.period;
2413 ppr_options = tinfo->goal.ppr_options;
2414 ahd_find_syncrate(ahd, &period, &ppr_options,
2415 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
4983 } 2416 }
4984 2417
4985 return (0); 2418 ahd_lock(ahd, &flags);
2419 ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options,
2420 AHD_TRANS_GOAL, FALSE);
2421 ahd_unlock(ahd, &flags);
2422}
2423
2424static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
2425{
2426 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2427 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2428 struct ahd_tmode_tstate *tstate;
2429 struct ahd_initiator_tinfo *tinfo
2430 = ahd_fetch_transinfo(ahd,
2431 starget->channel + 'A',
2432 shost->this_id, starget->id, &tstate);
2433 struct ahd_devinfo devinfo;
2434 unsigned int ppr_options = tinfo->goal.ppr_options
2435 & ~MSG_EXT_PPR_DT_REQ;
2436 unsigned int period = tinfo->goal.period;
2437 unsigned int width = tinfo->goal.width;
2438 unsigned long flags;
2439
2440#ifdef AHD_DEBUG
2441 if ((ahd_debug & AHD_SHOW_DV) != 0)
2442 printf("%s: %s DT\n", ahd_name(ahd),
2443 dt ? "enabling" : "disabling");
2444#endif
2445 if (dt) {
2446 ppr_options |= MSG_EXT_PPR_DT_REQ;
2447 if (!width)
2448 ahd_linux_set_width(starget, 1);
2449 } else {
2450 if (period <= 9)
2451 period = 10; /* If resetting DT, period must be >= 25ns */
2452 /* IU is invalid without DT set */
2453 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
2454 }
2455 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2456 starget->channel + 'A', ROLE_INITIATOR);
2457 ahd_find_syncrate(ahd, &period, &ppr_options,
2458 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2459
2460 ahd_lock(ahd, &flags);
2461 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2462 ppr_options, AHD_TRANS_GOAL, FALSE);
2463 ahd_unlock(ahd, &flags);
2464}
2465
2466static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
2467{
2468 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2469 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2470 struct ahd_tmode_tstate *tstate;
2471 struct ahd_initiator_tinfo *tinfo
2472 = ahd_fetch_transinfo(ahd,
2473 starget->channel + 'A',
2474 shost->this_id, starget->id, &tstate);
2475 struct ahd_devinfo devinfo;
2476 unsigned int ppr_options = tinfo->goal.ppr_options
2477 & ~MSG_EXT_PPR_QAS_REQ;
2478 unsigned int period = tinfo->goal.period;
2479 unsigned int dt;
2480 unsigned long flags;
2481
2482#ifdef AHD_DEBUG
2483 if ((ahd_debug & AHD_SHOW_DV) != 0)
2484 printf("%s: %s QAS\n", ahd_name(ahd),
2485 qas ? "enabling" : "disabling");
2486#endif
2487
2488 if (qas) {
2489 ppr_options |= MSG_EXT_PPR_QAS_REQ;
2490 }
2491
2492 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2493
2494 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2495 starget->channel + 'A', ROLE_INITIATOR);
2496 ahd_find_syncrate(ahd, &period, &ppr_options,
2497 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2498
2499 ahd_lock(ahd, &flags);
2500 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2501 ppr_options, AHD_TRANS_GOAL, FALSE);
2502 ahd_unlock(ahd, &flags);
2503}
2504
2505static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
2506{
2507 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2508 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2509 struct ahd_tmode_tstate *tstate;
2510 struct ahd_initiator_tinfo *tinfo
2511 = ahd_fetch_transinfo(ahd,
2512 starget->channel + 'A',
2513 shost->this_id, starget->id, &tstate);
2514 struct ahd_devinfo devinfo;
2515 unsigned int ppr_options = tinfo->goal.ppr_options
2516 & ~MSG_EXT_PPR_IU_REQ;
2517 unsigned int period = tinfo->goal.period;
2518 unsigned int dt;
2519 unsigned long flags;
2520
2521#ifdef AHD_DEBUG
2522 if ((ahd_debug & AHD_SHOW_DV) != 0)
2523 printf("%s: %s IU\n", ahd_name(ahd),
2524 iu ? "enabling" : "disabling");
4986#endif 2525#endif
2526
2527 if (iu) {
2528 ppr_options |= MSG_EXT_PPR_IU_REQ;
2529 ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */
2530 }
2531
2532 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2533
2534 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2535 starget->channel + 'A', ROLE_INITIATOR);
2536 ahd_find_syncrate(ahd, &period, &ppr_options,
2537 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2538
2539 ahd_lock(ahd, &flags);
2540 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2541 ppr_options, AHD_TRANS_GOAL, FALSE);
2542 ahd_unlock(ahd, &flags);
4987} 2543}
4988 2544
4989static void __exit 2545static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
4990ahd_linux_exit(void)
4991{ 2546{
4992 struct ahd_softc *ahd; 2547 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2548 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2549 struct ahd_tmode_tstate *tstate;
2550 struct ahd_initiator_tinfo *tinfo
2551 = ahd_fetch_transinfo(ahd,
2552 starget->channel + 'A',
2553 shost->this_id, starget->id, &tstate);
2554 struct ahd_devinfo devinfo;
2555 unsigned int ppr_options = tinfo->goal.ppr_options
2556 & ~MSG_EXT_PPR_RD_STRM;
2557 unsigned int period = tinfo->goal.period;
2558 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2559 unsigned long flags;
4993 2560
4994 /* 2561#ifdef AHD_DEBUG
4995 * Shutdown DV threads before going into the SCSI mid-layer. 2562 if ((ahd_debug & AHD_SHOW_DV) != 0)
4996 * This avoids situations where the mid-layer locks the entire 2563 printf("%s: %s Read Streaming\n", ahd_name(ahd),
4997 * kernel so that waiting for our DV threads to exit leads 2564 rdstrm ? "enabling" : "disabling");
4998 * to deadlock. 2565#endif
4999 */ 2566
5000 TAILQ_FOREACH(ahd, &ahd_tailq, links) { 2567 if (rdstrm)
2568 ppr_options |= MSG_EXT_PPR_RD_STRM;
2569
2570 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2571 starget->channel + 'A', ROLE_INITIATOR);
2572 ahd_find_syncrate(ahd, &period, &ppr_options,
2573 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
5001 2574
5002 ahd_linux_kill_dv_thread(ahd); 2575 ahd_lock(ahd, &flags);
2576 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2577 ppr_options, AHD_TRANS_GOAL, FALSE);
2578 ahd_unlock(ahd, &flags);
2579}
2580
2581static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
2582{
2583 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2584 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2585 struct ahd_tmode_tstate *tstate;
2586 struct ahd_initiator_tinfo *tinfo
2587 = ahd_fetch_transinfo(ahd,
2588 starget->channel + 'A',
2589 shost->this_id, starget->id, &tstate);
2590 struct ahd_devinfo devinfo;
2591 unsigned int ppr_options = tinfo->goal.ppr_options
2592 & ~MSG_EXT_PPR_WR_FLOW;
2593 unsigned int period = tinfo->goal.period;
2594 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2595 unsigned long flags;
2596
2597#ifdef AHD_DEBUG
2598 if ((ahd_debug & AHD_SHOW_DV) != 0)
2599 printf("%s: %s Write Flow Control\n", ahd_name(ahd),
2600 wrflow ? "enabling" : "disabling");
2601#endif
2602
2603 if (wrflow)
2604 ppr_options |= MSG_EXT_PPR_WR_FLOW;
2605
2606 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2607 starget->channel + 'A', ROLE_INITIATOR);
2608 ahd_find_syncrate(ahd, &period, &ppr_options,
2609 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2610
2611 ahd_lock(ahd, &flags);
2612 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2613 ppr_options, AHD_TRANS_GOAL, FALSE);
2614 ahd_unlock(ahd, &flags);
2615}
2616
2617static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
2618{
2619 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2620 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2621 struct ahd_tmode_tstate *tstate;
2622 struct ahd_initiator_tinfo *tinfo
2623 = ahd_fetch_transinfo(ahd,
2624 starget->channel + 'A',
2625 shost->this_id, starget->id, &tstate);
2626 struct ahd_devinfo devinfo;
2627 unsigned int ppr_options = tinfo->goal.ppr_options
2628 & ~MSG_EXT_PPR_RTI;
2629 unsigned int period = tinfo->goal.period;
2630 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2631 unsigned long flags;
2632
2633 if ((ahd->features & AHD_RTI) == 0) {
2634#ifdef AHD_DEBUG
2635 if ((ahd_debug & AHD_SHOW_DV) != 0)
2636 printf("%s: RTI not available\n", ahd_name(ahd));
2637#endif
2638 return;
5003 } 2639 }
5004 2640
5005#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 2641#ifdef AHD_DEBUG
2642 if ((ahd_debug & AHD_SHOW_DV) != 0)
2643 printf("%s: %s RTI\n", ahd_name(ahd),
2644 rti ? "enabling" : "disabling");
2645#endif
2646
2647 if (rti)
2648 ppr_options |= MSG_EXT_PPR_RTI;
2649
2650 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2651 starget->channel + 'A', ROLE_INITIATOR);
2652 ahd_find_syncrate(ahd, &period, &ppr_options,
2653 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2654
2655 ahd_lock(ahd, &flags);
2656 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2657 ppr_options, AHD_TRANS_GOAL, FALSE);
2658 ahd_unlock(ahd, &flags);
2659}
2660
2661static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2662{
2663 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2664 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2665 struct ahd_tmode_tstate *tstate;
2666 struct ahd_initiator_tinfo *tinfo
2667 = ahd_fetch_transinfo(ahd,
2668 starget->channel + 'A',
2669 shost->this_id, starget->id, &tstate);
2670 struct ahd_devinfo devinfo;
2671 unsigned int ppr_options = tinfo->goal.ppr_options
2672 & ~MSG_EXT_PPR_PCOMP_EN;
2673 unsigned int period = tinfo->goal.period;
2674 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2675 unsigned long flags;
2676
2677#ifdef AHD_DEBUG
2678 if ((ahd_debug & AHD_SHOW_DV) != 0)
2679 printf("%s: %s Precompensation\n", ahd_name(ahd),
2680 pcomp ? "Enable" : "Disable");
2681#endif
2682
2683 if (pcomp)
2684 ppr_options |= MSG_EXT_PPR_PCOMP_EN;
2685
2686 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2687 starget->channel + 'A', ROLE_INITIATOR);
2688 ahd_find_syncrate(ahd, &period, &ppr_options,
2689 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2690
2691 ahd_lock(ahd, &flags);
2692 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2693 ppr_options, AHD_TRANS_GOAL, FALSE);
2694 ahd_unlock(ahd, &flags);
2695}
2696
2697static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold)
2698{
2699 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2700 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2701 struct ahd_tmode_tstate *tstate;
2702 struct ahd_initiator_tinfo *tinfo
2703 = ahd_fetch_transinfo(ahd,
2704 starget->channel + 'A',
2705 shost->this_id, starget->id, &tstate);
2706 struct ahd_devinfo devinfo;
2707 unsigned int ppr_options = tinfo->goal.ppr_options
2708 & ~MSG_EXT_PPR_HOLD_MCS;
2709 unsigned int period = tinfo->goal.period;
2710 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2711 unsigned long flags;
2712
2713 if (hold)
2714 ppr_options |= MSG_EXT_PPR_HOLD_MCS;
2715
2716 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2717 starget->channel + 'A', ROLE_INITIATOR);
2718 ahd_find_syncrate(ahd, &period, &ppr_options,
2719 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2720
2721 ahd_lock(ahd, &flags);
2722 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2723 ppr_options, AHD_TRANS_GOAL, FALSE);
2724 ahd_unlock(ahd, &flags);
2725}
2726
2727
2728
2729static struct spi_function_template ahd_linux_transport_functions = {
2730 .set_offset = ahd_linux_set_offset,
2731 .show_offset = 1,
2732 .set_period = ahd_linux_set_period,
2733 .show_period = 1,
2734 .set_width = ahd_linux_set_width,
2735 .show_width = 1,
2736 .set_dt = ahd_linux_set_dt,
2737 .show_dt = 1,
2738 .set_iu = ahd_linux_set_iu,
2739 .show_iu = 1,
2740 .set_qas = ahd_linux_set_qas,
2741 .show_qas = 1,
2742 .set_rd_strm = ahd_linux_set_rd_strm,
2743 .show_rd_strm = 1,
2744 .set_wr_flow = ahd_linux_set_wr_flow,
2745 .show_wr_flow = 1,
2746 .set_rti = ahd_linux_set_rti,
2747 .show_rti = 1,
2748 .set_pcomp_en = ahd_linux_set_pcomp_en,
2749 .show_pcomp_en = 1,
2750 .set_hold_mcs = ahd_linux_set_hold_mcs,
2751 .show_hold_mcs = 1,
2752};
2753
2754static int __init
2755ahd_linux_init(void)
2756{
2757 int error = 0;
2758
5006 /* 2759 /*
5007 * In 2.4 we have to unregister from the PCI core _after_ 2760 * If we've been passed any parameters, process them now.
5008 * unregistering from the scsi midlayer to avoid dangling
5009 * references.
5010 */ 2761 */
5011 scsi_unregister_module(MODULE_SCSI_HA, &aic79xx_driver_template); 2762 if (aic79xx)
5012#endif 2763 aic79xx_setup(aic79xx);
2764
2765 ahd_linux_transport_template =
2766 spi_attach_transport(&ahd_linux_transport_functions);
2767 if (!ahd_linux_transport_template)
2768 return -ENODEV;
2769
2770 scsi_transport_reserve_target(ahd_linux_transport_template,
2771 sizeof(struct ahd_linux_target));
2772 scsi_transport_reserve_device(ahd_linux_transport_template,
2773 sizeof(struct ahd_linux_device));
2774
2775 error = ahd_linux_pci_init();
2776 if (error)
2777 spi_release_transport(ahd_linux_transport_template);
2778 return error;
2779}
2780
2781static void __exit
2782ahd_linux_exit(void)
2783{
5013 ahd_linux_pci_exit(); 2784 ahd_linux_pci_exit();
2785 spi_release_transport(ahd_linux_transport_template);
5014} 2786}
5015 2787
5016module_init(ahd_linux_init); 2788module_init(ahd_linux_init);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 7823e52e99ab..052c6619accc 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -42,6 +42,7 @@
42#ifndef _AIC79XX_LINUX_H_ 42#ifndef _AIC79XX_LINUX_H_
43#define _AIC79XX_LINUX_H_ 43#define _AIC79XX_LINUX_H_
44 44
45#include <linux/config.h>
45#include <linux/types.h> 46#include <linux/types.h>
46#include <linux/blkdev.h> 47#include <linux/blkdev.h>
47#include <linux/delay.h> 48#include <linux/delay.h>
@@ -49,18 +50,23 @@
49#include <linux/pci.h> 50#include <linux/pci.h>
50#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
51#include <linux/version.h> 52#include <linux/version.h>
53#include <linux/interrupt.h>
52#include <linux/module.h> 54#include <linux/module.h>
55#include <linux/slab.h>
53#include <asm/byteorder.h> 56#include <asm/byteorder.h>
54#include <asm/io.h> 57#include <asm/io.h>
55 58
56#include <linux/interrupt.h> /* For tasklet support. */ 59#include <scsi/scsi.h>
57#include <linux/config.h> 60#include <scsi/scsi_cmnd.h>
58#include <linux/slab.h> 61#include <scsi/scsi_eh.h>
62#include <scsi/scsi_device.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_transport.h>
66#include <scsi/scsi_transport_spi.h>
59 67
60/* Core SCSI definitions */ 68/* Core SCSI definitions */
61#define AIC_LIB_PREFIX ahd 69#define AIC_LIB_PREFIX ahd
62#include "scsi.h"
63#include <scsi/scsi_host.h>
64 70
65/* Name space conflict with BSD queue macros */ 71/* Name space conflict with BSD queue macros */
66#ifdef LIST_HEAD 72#ifdef LIST_HEAD
@@ -95,7 +101,7 @@
95/************************* Forward Declarations *******************************/ 101/************************* Forward Declarations *******************************/
96struct ahd_softc; 102struct ahd_softc;
97typedef struct pci_dev *ahd_dev_softc_t; 103typedef struct pci_dev *ahd_dev_softc_t;
98typedef Scsi_Cmnd *ahd_io_ctx_t; 104typedef struct scsi_cmnd *ahd_io_ctx_t;
99 105
100/******************************* Byte Order ***********************************/ 106/******************************* Byte Order ***********************************/
101#define ahd_htobe16(x) cpu_to_be16(x) 107#define ahd_htobe16(x) cpu_to_be16(x)
@@ -114,8 +120,7 @@ typedef Scsi_Cmnd *ahd_io_ctx_t;
114 120
115/************************* Configuration Data *********************************/ 121/************************* Configuration Data *********************************/
116extern uint32_t aic79xx_allow_memio; 122extern uint32_t aic79xx_allow_memio;
117extern int aic79xx_detect_complete; 123extern struct scsi_host_template aic79xx_driver_template;
118extern Scsi_Host_Template aic79xx_driver_template;
119 124
120/***************************** Bus Space/DMA **********************************/ 125/***************************** Bus Space/DMA **********************************/
121 126
@@ -145,11 +150,7 @@ struct ahd_linux_dma_tag
145}; 150};
146typedef struct ahd_linux_dma_tag* bus_dma_tag_t; 151typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
147 152
148struct ahd_linux_dmamap 153typedef dma_addr_t bus_dmamap_t;
149{
150 dma_addr_t bus_addr;
151};
152typedef struct ahd_linux_dmamap* bus_dmamap_t;
153 154
154typedef int bus_dma_filter_t(void*, dma_addr_t); 155typedef int bus_dma_filter_t(void*, dma_addr_t);
155typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); 156typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
@@ -226,12 +227,12 @@ typedef struct timer_list ahd_timer_t;
226#define ahd_timer_init init_timer 227#define ahd_timer_init init_timer
227#define ahd_timer_stop del_timer_sync 228#define ahd_timer_stop del_timer_sync
228typedef void ahd_linux_callback_t (u_long); 229typedef void ahd_linux_callback_t (u_long);
229static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec, 230static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
230 ahd_callback_t *func, void *arg); 231 ahd_callback_t *func, void *arg);
231static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec); 232static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
232 233
233static __inline void 234static __inline void
234ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg) 235ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
235{ 236{
236 struct ahd_softc *ahd; 237 struct ahd_softc *ahd;
237 238
@@ -252,43 +253,8 @@ ahd_scb_timer_reset(struct scb *scb, u_int usec)
252/***************************** SMP support ************************************/ 253/***************************** SMP support ************************************/
253#include <linux/spinlock.h> 254#include <linux/spinlock.h>
254 255
255#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
256#define AHD_SCSI_HAS_HOST_LOCK 1
257#else
258#define AHD_SCSI_HAS_HOST_LOCK 0
259#endif
260
261#define AIC79XX_DRIVER_VERSION "1.3.11" 256#define AIC79XX_DRIVER_VERSION "1.3.11"
262 257
263/**************************** Front End Queues ********************************/
264/*
265 * Data structure used to cast the Linux struct scsi_cmnd to something
266 * that allows us to use the queue macros. The linux structure has
267 * plenty of space to hold the links fields as required by the queue
268 * macros, but the queue macors require them to have the correct type.
269 */
270struct ahd_cmd_internal {
271 /* Area owned by the Linux scsi layer. */
272 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
273 union {
274 STAILQ_ENTRY(ahd_cmd) ste;
275 LIST_ENTRY(ahd_cmd) le;
276 TAILQ_ENTRY(ahd_cmd) tqe;
277 } links;
278 uint32_t end;
279};
280
281struct ahd_cmd {
282 union {
283 struct ahd_cmd_internal icmd;
284 struct scsi_cmnd scsi_cmd;
285 } un;
286};
287
288#define acmd_icmd(cmd) ((cmd)->un.icmd)
289#define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
290#define acmd_links un.icmd.links
291
292/*************************** Device Data Structures ***************************/ 258/*************************** Device Data Structures ***************************/
293/* 259/*
294 * A per probed device structure used to deal with some error recovery 260 * A per probed device structure used to deal with some error recovery
@@ -297,22 +263,17 @@ struct ahd_cmd {
297 * after a successfully completed inquiry command to the target when 263 * after a successfully completed inquiry command to the target when
298 * that inquiry data indicates a lun is present. 264 * that inquiry data indicates a lun is present.
299 */ 265 */
300TAILQ_HEAD(ahd_busyq, ahd_cmd); 266
301typedef enum { 267typedef enum {
302 AHD_DEV_UNCONFIGURED = 0x01,
303 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ 268 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
304 AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
305 AHD_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
306 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ 269 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
307 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ 270 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
308 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ 271 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
309 AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
310} ahd_linux_dev_flags; 272} ahd_linux_dev_flags;
311 273
312struct ahd_linux_target; 274struct ahd_linux_target;
313struct ahd_linux_device { 275struct ahd_linux_device {
314 TAILQ_ENTRY(ahd_linux_device) links; 276 TAILQ_ENTRY(ahd_linux_device) links;
315 struct ahd_busyq busyq;
316 277
317 /* 278 /*
318 * The number of transactions currently 279 * The number of transactions currently
@@ -388,62 +349,12 @@ struct ahd_linux_device {
388 */ 349 */
389 u_int commands_since_idle_or_otag; 350 u_int commands_since_idle_or_otag;
390#define AHD_OTAG_THRESH 500 351#define AHD_OTAG_THRESH 500
391
392 int lun;
393 Scsi_Device *scsi_device;
394 struct ahd_linux_target *target;
395}; 352};
396 353
397typedef enum {
398 AHD_DV_REQUIRED = 0x01,
399 AHD_INQ_VALID = 0x02,
400 AHD_BASIC_DV = 0x04,
401 AHD_ENHANCED_DV = 0x08
402} ahd_linux_targ_flags;
403
404/* DV States */
405typedef enum {
406 AHD_DV_STATE_EXIT = 0,
407 AHD_DV_STATE_INQ_SHORT_ASYNC,
408 AHD_DV_STATE_INQ_ASYNC,
409 AHD_DV_STATE_INQ_ASYNC_VERIFY,
410 AHD_DV_STATE_TUR,
411 AHD_DV_STATE_REBD,
412 AHD_DV_STATE_INQ_VERIFY,
413 AHD_DV_STATE_WEB,
414 AHD_DV_STATE_REB,
415 AHD_DV_STATE_SU,
416 AHD_DV_STATE_BUSY
417} ahd_dv_state;
418
419struct ahd_linux_target { 354struct ahd_linux_target {
420 struct ahd_linux_device *devices[AHD_NUM_LUNS]; 355 struct scsi_device *sdev[AHD_NUM_LUNS];
421 int channel;
422 int target;
423 int refcount;
424 struct ahd_transinfo last_tinfo; 356 struct ahd_transinfo last_tinfo;
425 struct ahd_softc *ahd; 357 struct ahd_softc *ahd;
426 ahd_linux_targ_flags flags;
427 struct scsi_inquiry_data *inq_data;
428 /*
429 * The next "fallback" period to use for narrow/wide transfers.
430 */
431 uint8_t dv_next_narrow_period;
432 uint8_t dv_next_wide_period;
433 uint8_t dv_max_width;
434 uint8_t dv_max_ppr_options;
435 uint8_t dv_last_ppr_options;
436 u_int dv_echo_size;
437 ahd_dv_state dv_state;
438 u_int dv_state_retry;
439 uint8_t *dv_buffer;
440 uint8_t *dv_buffer1;
441
442 /*
443 * Cumulative counter of errors.
444 */
445 u_long errors_detected;
446 u_long cmds_since_error;
447}; 358};
448 359
449/********************* Definitions Required by the Core ***********************/ 360/********************* Definitions Required by the Core ***********************/
@@ -453,32 +364,16 @@ struct ahd_linux_target {
453 * manner and are allocated below 4GB, the number of S/G segments is 364 * manner and are allocated below 4GB, the number of S/G segments is
454 * unrestricted. 365 * unrestricted.
455 */ 366 */
456#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
457/*
458 * We dynamically adjust the number of segments in pre-2.5 kernels to
459 * avoid fragmentation issues in the SCSI mid-layer's private memory
460 * allocator. See aic79xx_osm.c ahd_linux_size_nseg() for details.
461 */
462extern u_int ahd_linux_nseg;
463#define AHD_NSEG ahd_linux_nseg
464#define AHD_LINUX_MIN_NSEG 64
465#else
466#define AHD_NSEG 128 367#define AHD_NSEG 128
467#endif
468 368
469/* 369/*
470 * Per-SCB OSM storage. 370 * Per-SCB OSM storage.
471 */ 371 */
472typedef enum {
473 AHD_SCB_UP_EH_SEM = 0x1
474} ahd_linux_scb_flags;
475
476struct scb_platform_data { 372struct scb_platform_data {
477 struct ahd_linux_device *dev; 373 struct ahd_linux_device *dev;
478 dma_addr_t buf_busaddr; 374 dma_addr_t buf_busaddr;
479 uint32_t xfer_len; 375 uint32_t xfer_len;
480 uint32_t sense_resid; /* Auto-Sense residual */ 376 uint32_t sense_resid; /* Auto-Sense residual */
481 ahd_linux_scb_flags flags;
482}; 377};
483 378
484/* 379/*
@@ -487,44 +382,23 @@ struct scb_platform_data {
487 * alignment restrictions of the various platforms supported by 382 * alignment restrictions of the various platforms supported by
488 * this driver. 383 * this driver.
489 */ 384 */
490typedef enum {
491 AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
492 AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
493 AHD_DV_ACTIVE = 0x04,
494 AHD_DV_SHUTDOWN = 0x08,
495 AHD_RUN_CMPLT_Q_TIMER = 0x10
496} ahd_linux_softc_flags;
497
498TAILQ_HEAD(ahd_completeq, ahd_cmd);
499
500struct ahd_platform_data { 385struct ahd_platform_data {
501 /* 386 /*
502 * Fields accessed from interrupt context. 387 * Fields accessed from interrupt context.
503 */ 388 */
504 struct ahd_linux_target *targets[AHD_NUM_TARGETS]; 389 struct scsi_target *starget[AHD_NUM_TARGETS];
505 TAILQ_HEAD(, ahd_linux_device) device_runq;
506 struct ahd_completeq completeq;
507 390
508 spinlock_t spin_lock; 391 spinlock_t spin_lock;
509 struct tasklet_struct runq_tasklet;
510 u_int qfrozen; 392 u_int qfrozen;
511 pid_t dv_pid;
512 struct timer_list completeq_timer;
513 struct timer_list reset_timer; 393 struct timer_list reset_timer;
514 struct timer_list stats_timer;
515 struct semaphore eh_sem; 394 struct semaphore eh_sem;
516 struct semaphore dv_sem;
517 struct semaphore dv_cmd_sem; /* XXX This needs to be in
518 * the target struct
519 */
520 struct scsi_device *dv_scsi_dev;
521 struct Scsi_Host *host; /* pointer to scsi host */ 395 struct Scsi_Host *host; /* pointer to scsi host */
522#define AHD_LINUX_NOIRQ ((uint32_t)~0) 396#define AHD_LINUX_NOIRQ ((uint32_t)~0)
523 uint32_t irq; /* IRQ for this adapter */ 397 uint32_t irq; /* IRQ for this adapter */
524 uint32_t bios_address; 398 uint32_t bios_address;
525 uint32_t mem_busaddr; /* Mem Base Addr */ 399 uint32_t mem_busaddr; /* Mem Base Addr */
526 uint64_t hw_dma_mask; 400#define AHD_SCB_UP_EH_SEM 0x1
527 ahd_linux_softc_flags flags; 401 uint32_t flags;
528}; 402};
529 403
530/************************** OS Utility Wrappers *******************************/ 404/************************** OS Utility Wrappers *******************************/
@@ -641,7 +515,7 @@ ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
641 515
642/**************************** Initialization **********************************/ 516/**************************** Initialization **********************************/
643int ahd_linux_register_host(struct ahd_softc *, 517int ahd_linux_register_host(struct ahd_softc *,
644 Scsi_Host_Template *); 518 struct scsi_host_template *);
645 519
646uint64_t ahd_linux_get_memsize(void); 520uint64_t ahd_linux_get_memsize(void);
647 521
@@ -657,28 +531,6 @@ void ahd_format_transinfo(struct info_str *info,
657 struct ahd_transinfo *tinfo); 531 struct ahd_transinfo *tinfo);
658 532
659/******************************** Locking *************************************/ 533/******************************** Locking *************************************/
660/* Lock protecting internal data structures */
661static __inline void ahd_lockinit(struct ahd_softc *);
662static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
663static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
664
665/* Lock acquisition and release of the above lock in midlayer entry points. */
666static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
667 unsigned long *flags);
668static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
669 unsigned long *flags);
670
671/* Lock held during command compeletion to the upper layer */
672static __inline void ahd_done_lockinit(struct ahd_softc *);
673static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
674static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
675
676/* Lock held during ahd_list manipulation and ahd softc frees */
677extern spinlock_t ahd_list_spinlock;
678static __inline void ahd_list_lockinit(void);
679static __inline void ahd_list_lock(unsigned long *flags);
680static __inline void ahd_list_unlock(unsigned long *flags);
681
682static __inline void 534static __inline void
683ahd_lockinit(struct ahd_softc *ahd) 535ahd_lockinit(struct ahd_softc *ahd)
684{ 536{
@@ -697,75 +549,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
697 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags); 549 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
698} 550}
699 551
700static __inline void
701ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
702{
703 /*
704 * In 2.5.X and some 2.4.X versions, the midlayer takes our
705 * lock just before calling us, so we avoid locking again.
706 * For other kernel versions, the io_request_lock is taken
707 * just before our entry point is called. In this case, we
708 * trade the io_request_lock for our per-softc lock.
709 */
710#if AHD_SCSI_HAS_HOST_LOCK == 0
711 spin_unlock(&io_request_lock);
712 spin_lock(&ahd->platform_data->spin_lock);
713#endif
714}
715
716static __inline void
717ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
718{
719#if AHD_SCSI_HAS_HOST_LOCK == 0
720 spin_unlock(&ahd->platform_data->spin_lock);
721 spin_lock(&io_request_lock);
722#endif
723}
724
725static __inline void
726ahd_done_lockinit(struct ahd_softc *ahd)
727{
728 /*
729 * In 2.5.X, our own lock is held during completions.
730 * In previous versions, the io_request_lock is used.
731 * In either case, we can't initialize this lock again.
732 */
733}
734
735static __inline void
736ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
737{
738#if AHD_SCSI_HAS_HOST_LOCK == 0
739 spin_lock(&io_request_lock);
740#endif
741}
742
743static __inline void
744ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
745{
746#if AHD_SCSI_HAS_HOST_LOCK == 0
747 spin_unlock(&io_request_lock);
748#endif
749}
750
751static __inline void
752ahd_list_lockinit(void)
753{
754 spin_lock_init(&ahd_list_spinlock);
755}
756
757static __inline void
758ahd_list_lock(unsigned long *flags)
759{
760 spin_lock_irqsave(&ahd_list_spinlock, *flags);
761}
762
763static __inline void
764ahd_list_unlock(unsigned long *flags)
765{
766 spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
767}
768
769/******************************* PCI Definitions ******************************/ 552/******************************* PCI Definitions ******************************/
770/* 553/*
771 * PCIM_xxx: mask to locate subfield in register 554 * PCIM_xxx: mask to locate subfield in register
@@ -925,27 +708,17 @@ ahd_flush_device_writes(struct ahd_softc *ahd)
925} 708}
926 709
927/**************************** Proc FS Support *********************************/ 710/**************************** Proc FS Support *********************************/
928#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
929int ahd_linux_proc_info(char *, char **, off_t, int, int, int);
930#else
931int ahd_linux_proc_info(struct Scsi_Host *, char *, char **, 711int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
932 off_t, int, int); 712 off_t, int, int);
933#endif
934
935/*************************** Domain Validation ********************************/
936#define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
937#define AHD_DV_SIMQ_FROZEN(ahd) \
938 ((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
939 && (ahd)->platform_data->qfrozen == 1)
940 713
941/*********************** Transaction Access Wrappers **************************/ 714/*********************** Transaction Access Wrappers **************************/
942static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); 715static __inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
943static __inline void ahd_set_transaction_status(struct scb *, uint32_t); 716static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
944static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t); 717static __inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
945static __inline void ahd_set_scsi_status(struct scb *, uint32_t); 718static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
946static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd); 719static __inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
947static __inline uint32_t ahd_get_transaction_status(struct scb *); 720static __inline uint32_t ahd_get_transaction_status(struct scb *);
948static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd); 721static __inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
949static __inline uint32_t ahd_get_scsi_status(struct scb *); 722static __inline uint32_t ahd_get_scsi_status(struct scb *);
950static __inline void ahd_set_transaction_tag(struct scb *, int, u_int); 723static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
951static __inline u_long ahd_get_transfer_length(struct scb *); 724static __inline u_long ahd_get_transfer_length(struct scb *);
@@ -964,7 +737,7 @@ static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
964static __inline void ahd_freeze_scb(struct scb *scb); 737static __inline void ahd_freeze_scb(struct scb *scb);
965 738
966static __inline 739static __inline
967void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status) 740void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
968{ 741{
969 cmd->result &= ~(CAM_STATUS_MASK << 16); 742 cmd->result &= ~(CAM_STATUS_MASK << 16);
970 cmd->result |= status << 16; 743 cmd->result |= status << 16;
@@ -977,7 +750,7 @@ void ahd_set_transaction_status(struct scb *scb, uint32_t status)
977} 750}
978 751
979static __inline 752static __inline
980void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status) 753void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
981{ 754{
982 cmd->result &= ~0xFFFF; 755 cmd->result &= ~0xFFFF;
983 cmd->result |= status; 756 cmd->result |= status;
@@ -990,7 +763,7 @@ void ahd_set_scsi_status(struct scb *scb, uint32_t status)
990} 763}
991 764
992static __inline 765static __inline
993uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd) 766uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd)
994{ 767{
995 return ((cmd->result >> 16) & CAM_STATUS_MASK); 768 return ((cmd->result >> 16) & CAM_STATUS_MASK);
996} 769}
@@ -1002,7 +775,7 @@ uint32_t ahd_get_transaction_status(struct scb *scb)
1002} 775}
1003 776
1004static __inline 777static __inline
1005uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd) 778uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd)
1006{ 779{
1007 return (cmd->result & 0xFFFF); 780 return (cmd->result & 0xFFFF);
1008} 781}
@@ -1117,7 +890,6 @@ void ahd_done(struct ahd_softc*, struct scb*);
1117void ahd_send_async(struct ahd_softc *, char channel, 890void ahd_send_async(struct ahd_softc *, char channel,
1118 u_int target, u_int lun, ac_code, void *); 891 u_int target, u_int lun, ac_code, void *);
1119void ahd_print_path(struct ahd_softc *, struct scb *); 892void ahd_print_path(struct ahd_softc *, struct scb *);
1120void ahd_platform_dump_card_state(struct ahd_softc *ahd);
1121 893
1122#ifdef CONFIG_PCI 894#ifdef CONFIG_PCI
1123#define AHD_PCI_CONFIG 1 895#define AHD_PCI_CONFIG 1
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 91daf0c7fb10..390b53852d4b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -92,27 +92,31 @@ struct pci_driver aic79xx_pci_driver = {
92static void 92static void
93ahd_linux_pci_dev_remove(struct pci_dev *pdev) 93ahd_linux_pci_dev_remove(struct pci_dev *pdev)
94{ 94{
95 struct ahd_softc *ahd; 95 struct ahd_softc *ahd = pci_get_drvdata(pdev);
96 u_long l; 96 u_long s;
97 97
98 /* 98 ahd_lock(ahd, &s);
99 * We should be able to just perform 99 ahd_intr_enable(ahd, FALSE);
100 * the free directly, but check our 100 ahd_unlock(ahd, &s);
101 * list for extra sanity. 101 ahd_free(ahd);
102 */ 102}
103 ahd_list_lock(&l); 103
104 ahd = ahd_find_softc((struct ahd_softc *)pci_get_drvdata(pdev)); 104static void
105 if (ahd != NULL) { 105ahd_linux_pci_inherit_flags(struct ahd_softc *ahd)
106 u_long s; 106{
107 107 struct pci_dev *pdev = ahd->dev_softc, *master_pdev;
108 TAILQ_REMOVE(&ahd_tailq, ahd, links); 108 unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
109 ahd_list_unlock(&l); 109
110 ahd_lock(ahd, &s); 110 master_pdev = pci_get_slot(pdev->bus, master_devfn);
111 ahd_intr_enable(ahd, FALSE); 111 if (master_pdev) {
112 ahd_unlock(ahd, &s); 112 struct ahd_softc *master = pci_get_drvdata(master_pdev);
113 ahd_free(ahd); 113 if (master) {
114 } else 114 ahd->flags &= ~AHD_BIOS_ENABLED;
115 ahd_list_unlock(&l); 115 ahd->flags |= master->flags & AHD_BIOS_ENABLED;
116 } else
117 printk(KERN_ERR "aic79xx: no multichannel peer found!\n");
118 pci_dev_put(master_pdev);
119 }
116} 120}
117 121
118static int 122static int
@@ -125,22 +129,6 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
125 char *name; 129 char *name;
126 int error; 130 int error;
127 131
128 /*
129 * Some BIOSen report the same device multiple times.
130 */
131 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
132 struct pci_dev *probed_pdev;
133
134 probed_pdev = ahd->dev_softc;
135 if (probed_pdev->bus->number == pdev->bus->number
136 && probed_pdev->devfn == pdev->devfn)
137 break;
138 }
139 if (ahd != NULL) {
140 /* Skip duplicate. */
141 return (-ENODEV);
142 }
143
144 pci = pdev; 132 pci = pdev;
145 entry = ahd_find_pci_device(pci); 133 entry = ahd_find_pci_device(pci);
146 if (entry == NULL) 134 if (entry == NULL)
@@ -177,15 +165,12 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
177 if (memsize >= 0x8000000000ULL 165 if (memsize >= 0x8000000000ULL
178 && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { 166 && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
179 ahd->flags |= AHD_64BIT_ADDRESSING; 167 ahd->flags |= AHD_64BIT_ADDRESSING;
180 ahd->platform_data->hw_dma_mask = DMA_64BIT_MASK;
181 } else if (memsize > 0x80000000 168 } else if (memsize > 0x80000000
182 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 169 && pci_set_dma_mask(pdev, mask_39bit) == 0) {
183 ahd->flags |= AHD_39BIT_ADDRESSING; 170 ahd->flags |= AHD_39BIT_ADDRESSING;
184 ahd->platform_data->hw_dma_mask = mask_39bit;
185 } 171 }
186 } else { 172 } else {
187 pci_set_dma_mask(pdev, DMA_32BIT_MASK); 173 pci_set_dma_mask(pdev, DMA_32BIT_MASK);
188 ahd->platform_data->hw_dma_mask = DMA_32BIT_MASK;
189 } 174 }
190 ahd->dev_softc = pci; 175 ahd->dev_softc = pci;
191 error = ahd_pci_config(ahd, entry); 176 error = ahd_pci_config(ahd, entry);
@@ -193,16 +178,17 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
193 ahd_free(ahd); 178 ahd_free(ahd);
194 return (-error); 179 return (-error);
195 } 180 }
181
182 /*
183 * Second Function PCI devices need to inherit some
184 * * settings from function 0.
185 */
186 if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
187 ahd_linux_pci_inherit_flags(ahd);
188
196 pci_set_drvdata(pdev, ahd); 189 pci_set_drvdata(pdev, ahd);
197 if (aic79xx_detect_complete) { 190
198#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 191 ahd_linux_register_host(ahd, &aic79xx_driver_template);
199 ahd_linux_register_host(ahd, &aic79xx_driver_template);
200#else
201 printf("aic79xx: ignoring PCI device found after "
202 "initialization\n");
203 return (-ENODEV);
204#endif
205 }
206 return (0); 192 return (0);
207} 193}
208 194
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 703f6e44889d..2131db60018a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -283,7 +283,6 @@ int
283ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) 283ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
284{ 284{
285 struct scb_data *shared_scb_data; 285 struct scb_data *shared_scb_data;
286 u_long l;
287 u_int command; 286 u_int command;
288 uint32_t devconfig; 287 uint32_t devconfig;
289 uint16_t subvendor; 288 uint16_t subvendor;
@@ -373,16 +372,9 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
373 * Allow interrupts now that we are completely setup. 372 * Allow interrupts now that we are completely setup.
374 */ 373 */
375 error = ahd_pci_map_int(ahd); 374 error = ahd_pci_map_int(ahd);
376 if (error != 0) 375 if (!error)
377 return (error); 376 ahd->init_level++;
378 377 return error;
379 ahd_list_lock(&l);
380 /*
381 * Link this softc in with all other ahd instances.
382 */
383 ahd_softc_insert(ahd);
384 ahd_list_unlock(&l);
385 return (0);
386} 378}
387 379
388/* 380/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index e01cd6175e34..39a27840fce6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -49,10 +49,53 @@ static void ahd_dump_target_state(struct ahd_softc *ahd,
49 u_int our_id, char channel, 49 u_int our_id, char channel,
50 u_int target_id, u_int target_offset); 50 u_int target_id, u_int target_offset);
51static void ahd_dump_device_state(struct info_str *info, 51static void ahd_dump_device_state(struct info_str *info,
52 struct ahd_linux_device *dev); 52 struct scsi_device *sdev);
53static int ahd_proc_write_seeprom(struct ahd_softc *ahd, 53static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
54 char *buffer, int length); 54 char *buffer, int length);
55 55
56/*
57 * Table of syncrates that don't follow the "divisible by 4"
58 * rule. This table will be expanded in future SCSI specs.
59 */
60static struct {
61 u_int period_factor;
62 u_int period; /* in 100ths of ns */
63} scsi_syncrates[] = {
64 { 0x08, 625 }, /* FAST-160 */
65 { 0x09, 1250 }, /* FAST-80 */
66 { 0x0a, 2500 }, /* FAST-40 40MHz */
67 { 0x0b, 3030 }, /* FAST-40 33MHz */
68 { 0x0c, 5000 } /* FAST-20 */
69};
70
71/*
72 * Return the frequency in kHz corresponding to the given
73 * sync period factor.
74 */
75static u_int
76ahd_calc_syncsrate(u_int period_factor)
77{
78 int i;
79 int num_syncrates;
80
81 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
82 /* See if the period is in the "exception" table */
83 for (i = 0; i < num_syncrates; i++) {
84
85 if (period_factor == scsi_syncrates[i].period_factor) {
86 /* Period in kHz */
87 return (100000000 / scsi_syncrates[i].period);
88 }
89 }
90
91 /*
92 * Wasn't in the table, so use the standard
93 * 4 times conversion.
94 */
95 return (10000000 / (period_factor * 4 * 10));
96}
97
98
56static void 99static void
57copy_mem_info(struct info_str *info, char *data, int len) 100copy_mem_info(struct info_str *info, char *data, int len)
58{ 101{
@@ -109,7 +152,7 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
109 speed = 3300; 152 speed = 3300;
110 freq = 0; 153 freq = 0;
111 if (tinfo->offset != 0) { 154 if (tinfo->offset != 0) {
112 freq = aic_calc_syncsrate(tinfo->period); 155 freq = ahd_calc_syncsrate(tinfo->period);
113 speed = freq; 156 speed = freq;
114 } 157 }
115 speed *= (0x01 << tinfo->width); 158 speed *= (0x01 << tinfo->width);
@@ -167,6 +210,7 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
167 u_int target_offset) 210 u_int target_offset)
168{ 211{
169 struct ahd_linux_target *targ; 212 struct ahd_linux_target *targ;
213 struct scsi_target *starget;
170 struct ahd_initiator_tinfo *tinfo; 214 struct ahd_initiator_tinfo *tinfo;
171 struct ahd_tmode_tstate *tstate; 215 struct ahd_tmode_tstate *tstate;
172 int lun; 216 int lun;
@@ -176,20 +220,20 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
176 copy_info(info, "Target %d Negotiation Settings\n", target_id); 220 copy_info(info, "Target %d Negotiation Settings\n", target_id);
177 copy_info(info, "\tUser: "); 221 copy_info(info, "\tUser: ");
178 ahd_format_transinfo(info, &tinfo->user); 222 ahd_format_transinfo(info, &tinfo->user);
179 targ = ahd->platform_data->targets[target_offset]; 223 starget = ahd->platform_data->starget[target_offset];
180 if (targ == NULL) 224 if (starget == NULL)
181 return; 225 return;
226 targ = scsi_transport_target_data(starget);
182 227
183 copy_info(info, "\tGoal: "); 228 copy_info(info, "\tGoal: ");
184 ahd_format_transinfo(info, &tinfo->goal); 229 ahd_format_transinfo(info, &tinfo->goal);
185 copy_info(info, "\tCurr: "); 230 copy_info(info, "\tCurr: ");
186 ahd_format_transinfo(info, &tinfo->curr); 231 ahd_format_transinfo(info, &tinfo->curr);
187 copy_info(info, "\tTransmission Errors %ld\n", targ->errors_detected);
188 232
189 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 233 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
190 struct ahd_linux_device *dev; 234 struct scsi_device *dev;
191 235
192 dev = targ->devices[lun]; 236 dev = targ->sdev[lun];
193 237
194 if (dev == NULL) 238 if (dev == NULL)
195 continue; 239 continue;
@@ -199,10 +243,13 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
199} 243}
200 244
201static void 245static void
202ahd_dump_device_state(struct info_str *info, struct ahd_linux_device *dev) 246ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev)
203{ 247{
248 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
249
204 copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", 250 copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
205 dev->target->channel + 'A', dev->target->target, dev->lun); 251 sdev->sdev_target->channel + 'A',
252 sdev->sdev_target->id, sdev->lun);
206 253
207 copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); 254 copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
208 copy_info(info, "\t\tCommands Active %d\n", dev->active); 255 copy_info(info, "\t\tCommands Active %d\n", dev->active);
@@ -278,36 +325,16 @@ done:
278 * Return information to handle /proc support for the driver. 325 * Return information to handle /proc support for the driver.
279 */ 326 */
280int 327int
281#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
282ahd_linux_proc_info(char *buffer, char **start, off_t offset,
283 int length, int hostno, int inout)
284#else
285ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, 328ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
286 off_t offset, int length, int inout) 329 off_t offset, int length, int inout)
287#endif
288{ 330{
289 struct ahd_softc *ahd; 331 struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
290 struct info_str info; 332 struct info_str info;
291 char ahd_info[256]; 333 char ahd_info[256];
292 u_long l;
293 u_int max_targ; 334 u_int max_targ;
294 u_int i; 335 u_int i;
295 int retval; 336 int retval;
296 337
297 retval = -EINVAL;
298 ahd_list_lock(&l);
299#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
300 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
301 if (ahd->platform_data->host->host_no == hostno)
302 break;
303 }
304#else
305 ahd = ahd_find_softc(*(struct ahd_softc **)shost->hostdata);
306#endif
307
308 if (ahd == NULL)
309 goto done;
310
311 /* Has data been written to the file? */ 338 /* Has data been written to the file? */
312 if (inout == TRUE) { 339 if (inout == TRUE) {
313 retval = ahd_proc_write_seeprom(ahd, buffer, length); 340 retval = ahd_proc_write_seeprom(ahd, buffer, length);
@@ -357,6 +384,5 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
357 } 384 }
358 retval = info.pos > info.offset ? info.pos - info.offset : 0; 385 retval = info.pos > info.offset ? info.pos - info.offset : 0;
359done: 386done:
360 ahd_list_unlock(&l);
361 return (retval); 387 return (retval);
362} 388}
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 088cbc23743d..91d294c6334e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -37,7 +37,7 @@
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES. 38 * POSSIBILITY OF SUCH DAMAGES.
39 * 39 *
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#79 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $
41 * 41 *
42 * $FreeBSD$ 42 * $FreeBSD$
43 */ 43 */
@@ -243,7 +243,7 @@ typedef enum {
243 */ 243 */
244 AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, 244 AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
245 AHC_AIC7860_FE = AHC_AIC7850_FE, 245 AHC_AIC7860_FE = AHC_AIC7850_FE,
246 AHC_AIC7870_FE = AHC_TARGETMODE, 246 AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE,
247 AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, 247 AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA,
248 /* 248 /*
249 * Although we have space for both the initiator and 249 * Although we have space for both the initiator and
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index 810ec700d9fc..e196d83b93c7 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -39,7 +39,7 @@
39 * 39 *
40 * $FreeBSD$ 40 * $FreeBSD$
41 */ 41 */
42VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $" 42VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
43 43
44/* 44/*
45 * This file is processed by the aic7xxx_asm utility for use in assembling 45 * This file is processed by the aic7xxx_asm utility for use in assembling
@@ -1306,7 +1306,6 @@ scratch_ram {
1306 */ 1306 */
1307 MWI_RESIDUAL { 1307 MWI_RESIDUAL {
1308 size 1 1308 size 1
1309 alias TARG_IMMEDIATE_SCB
1310 } 1309 }
1311 /* 1310 /*
1312 * SCBID of the next SCB to be started by the controller. 1311 * SCBID of the next SCB to be started by the controller.
@@ -1461,6 +1460,7 @@ scratch_ram {
1461 */ 1460 */
1462 LAST_MSG { 1461 LAST_MSG {
1463 size 1 1462 size 1
1463 alias TARG_IMMEDIATE_SCB
1464 } 1464 }
1465 1465
1466 /* 1466 /*
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq
index d84b741fbab5..15196390e28d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx/aic7xxx.seq
@@ -40,7 +40,7 @@
40 * $FreeBSD$ 40 * $FreeBSD$
41 */ 41 */
42 42
43VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $" 43VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
44PATCH_ARG_LIST = "struct ahc_softc *ahc" 44PATCH_ARG_LIST = "struct ahc_softc *ahc"
45PREFIX = "ahc_" 45PREFIX = "ahc_"
46 46
@@ -679,6 +679,7 @@ await_busfree:
679 clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ 679 clr SCSIBUSL; /* Prevent bit leakage durint SELTO */
680 } 680 }
681 and SXFRCTL0, ~SPIOEN; 681 and SXFRCTL0, ~SPIOEN;
682 mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
682 test SSTAT1,REQINIT|BUSFREE jz .; 683 test SSTAT1,REQINIT|BUSFREE jz .;
683 test SSTAT1, BUSFREE jnz poll_for_work; 684 test SSTAT1, BUSFREE jnz poll_for_work;
684 mvi MISSED_BUSFREE call set_seqint; 685 mvi MISSED_BUSFREE call set_seqint;
@@ -1097,7 +1098,7 @@ ultra2_dmahalt:
1097 test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; 1098 test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg;
1098 if ((ahc->flags & AHC_TARGETROLE) != 0) { 1099 if ((ahc->flags & AHC_TARGETROLE) != 0) {
1099 test SSTAT0, TARGET jz dma_last_sg; 1100 test SSTAT0, TARGET jz dma_last_sg;
1100 if ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0) { 1101 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) {
1101 test DMAPARAMS, DIRECTION jz dma_mid_sg; 1102 test DMAPARAMS, DIRECTION jz dma_mid_sg;
1102 } 1103 }
1103 } 1104 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 468d612a44f6..3cb07e114e89 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -28,9 +28,7 @@
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 * 30 *
31 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#17 $ 31 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $
32 *
33 * $FreeBSD$
34 */ 32 */
35 33
36/* 34/*
@@ -64,7 +62,6 @@
64 * is preceded by an initial zero (leading 0, followed by 16-bits, MSB 62 * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
65 * first). The clock cycling from low to high initiates the next data 63 * first). The clock cycling from low to high initiates the next data
66 * bit to be sent from the chip. 64 * bit to be sent from the chip.
67 *
68 */ 65 */
69 66
70#ifdef __linux__ 67#ifdef __linux__
@@ -81,14 +78,22 @@
81 * Right now, we only have to read the SEEPROM. But we make it easier to 78 * Right now, we only have to read the SEEPROM. But we make it easier to
82 * add other 93Cx6 functions. 79 * add other 93Cx6 functions.
83 */ 80 */
84static struct seeprom_cmd { 81struct seeprom_cmd {
85 uint8_t len; 82 uint8_t len;
86 uint8_t bits[9]; 83 uint8_t bits[11];
87} seeprom_read = {3, {1, 1, 0}}; 84};
88 85
86/* Short opcodes for the c46 */
89static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 87static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
90static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 88static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
89
90/* Long opcodes for the C56/C66 */
91static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
92static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
93
94/* Common opcodes */
91static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; 95static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
96static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
92 97
93/* 98/*
94 * Wait for the SEERDY to go high; about 800 ns. 99 * Wait for the SEERDY to go high; about 800 ns.
@@ -222,12 +227,25 @@ int
222ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, 227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
223 u_int start_addr, u_int count) 228 u_int start_addr, u_int count)
224{ 229{
230 struct seeprom_cmd *ewen, *ewds;
225 uint16_t v; 231 uint16_t v;
226 uint8_t temp; 232 uint8_t temp;
227 int i, k; 233 int i, k;
228 234
229 /* Place the chip into write-enable mode */ 235 /* Place the chip into write-enable mode */
230 send_seeprom_cmd(sd, &seeprom_ewen); 236 if (sd->sd_chip == C46) {
237 ewen = &seeprom_ewen;
238 ewds = &seeprom_ewds;
239 } else if (sd->sd_chip == C56_66) {
240 ewen = &seeprom_long_ewen;
241 ewds = &seeprom_long_ewds;
242 } else {
243 printf("ahc_write_seeprom: unsupported seeprom type %d\n",
244 sd->sd_chip);
245 return (0);
246 }
247
248 send_seeprom_cmd(sd, ewen);
231 reset_seeprom(sd); 249 reset_seeprom(sd);
232 250
233 /* Write all requested data out to the seeprom. */ 251 /* Write all requested data out to the seeprom. */
@@ -277,7 +295,7 @@ ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
277 } 295 }
278 296
279 /* Put the chip back into write-protect mode */ 297 /* Put the chip back into write-protect mode */
280 send_seeprom_cmd(sd, &seeprom_ewds); 298 send_seeprom_cmd(sd, ewds);
281 reset_seeprom(sd); 299 reset_seeprom(sd);
282 300
283 return (1); 301 return (1);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 7bc01e41bcce..58ac46103eb6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -37,9 +37,7 @@
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES. 38 * POSSIBILITY OF SUCH DAMAGES.
39 * 39 *
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
41 *
42 * $FreeBSD$
43 */ 41 */
44 42
45#ifdef __linux__ 43#ifdef __linux__
@@ -287,10 +285,19 @@ ahc_restart(struct ahc_softc *ahc)
287 ahc_outb(ahc, SEQ_FLAGS2, 285 ahc_outb(ahc, SEQ_FLAGS2,
288 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 286 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
289 } 287 }
288
289 /*
290 * Clear any pending sequencer interrupt. It is no
291 * longer relevant since we're resetting the Program
292 * Counter.
293 */
294 ahc_outb(ahc, CLRINT, CLRSEQINT);
295
290 ahc_outb(ahc, MWI_RESIDUAL, 0); 296 ahc_outb(ahc, MWI_RESIDUAL, 0);
291 ahc_outb(ahc, SEQCTL, ahc->seqctl); 297 ahc_outb(ahc, SEQCTL, ahc->seqctl);
292 ahc_outb(ahc, SEQADDR0, 0); 298 ahc_outb(ahc, SEQADDR0, 0);
293 ahc_outb(ahc, SEQADDR1, 0); 299 ahc_outb(ahc, SEQADDR1, 0);
300
294 ahc_unpause(ahc); 301 ahc_unpause(ahc);
295} 302}
296 303
@@ -1174,19 +1181,20 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1174 scb_index); 1181 scb_index);
1175 } 1182 }
1176#endif 1183#endif
1177 /*
1178 * Force a renegotiation with this target just in
1179 * case the cable was pulled and will later be
1180 * re-attached. The target may forget its negotiation
1181 * settings with us should it attempt to reselect
1182 * during the interruption. The target will not issue
1183 * a unit attention in this case, so we must always
1184 * renegotiate.
1185 */
1186 ahc_scb_devinfo(ahc, &devinfo, scb); 1184 ahc_scb_devinfo(ahc, &devinfo, scb);
1187 ahc_force_renegotiation(ahc, &devinfo);
1188 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1185 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1189 ahc_freeze_devq(ahc, scb); 1186 ahc_freeze_devq(ahc, scb);
1187
1188 /*
1189 * Cancel any pending transactions on the device
1190 * now that it seems to be missing. This will
1191 * also revert us to async/narrow transfers until
1192 * we can renegotiate with the device.
1193 */
1194 ahc_handle_devreset(ahc, &devinfo,
1195 CAM_SEL_TIMEOUT,
1196 "Selection Timeout",
1197 /*verbose_level*/1);
1190 } 1198 }
1191 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1199 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1192 ahc_restart(ahc); 1200 ahc_restart(ahc);
@@ -3763,8 +3771,9 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3763 /*period*/0, /*offset*/0, /*ppr_options*/0, 3771 /*period*/0, /*offset*/0, /*ppr_options*/0,
3764 AHC_TRANS_CUR, /*paused*/TRUE); 3772 AHC_TRANS_CUR, /*paused*/TRUE);
3765 3773
3766 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3774 if (status != CAM_SEL_TIMEOUT)
3767 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3775 ahc_send_async(ahc, devinfo->channel, devinfo->target,
3776 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3768 3777
3769 if (message != NULL 3778 if (message != NULL
3770 && (verbose_level <= bootverbose)) 3779 && (verbose_level <= bootverbose))
@@ -4003,14 +4012,6 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
4003 * to disturb the integrity of the bus. 4012 * to disturb the integrity of the bus.
4004 */ 4013 */
4005 ahc_pause(ahc); 4014 ahc_pause(ahc);
4006 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) {
4007 /*
4008 * The chip has not been initialized since
4009 * PCI/EISA/VLB bus reset. Don't trust
4010 * "left over BIOS data".
4011 */
4012 ahc->flags |= AHC_NO_BIOS_INIT;
4013 }
4014 sxfrctl1_b = 0; 4015 sxfrctl1_b = 0;
4015 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4016 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4016 u_int sblkctl; 4017 u_int sblkctl;
@@ -5036,14 +5037,23 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
5036 ahc->flags |= AHC_ALL_INTERRUPTS; 5037 ahc->flags |= AHC_ALL_INTERRUPTS;
5037 paused = FALSE; 5038 paused = FALSE;
5038 do { 5039 do {
5039 if (paused) 5040 if (paused) {
5040 ahc_unpause(ahc); 5041 ahc_unpause(ahc);
5042 /*
5043 * Give the sequencer some time to service
5044 * any active selections.
5045 */
5046 ahc_delay(500);
5047 }
5041 ahc_intr(ahc); 5048 ahc_intr(ahc);
5042 ahc_pause(ahc); 5049 ahc_pause(ahc);
5043 paused = TRUE; 5050 paused = TRUE;
5044 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 5051 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5045 ahc_clear_critical_section(ahc);
5046 intstat = ahc_inb(ahc, INTSTAT); 5052 intstat = ahc_inb(ahc, INTSTAT);
5053 if ((intstat & INT_PEND) == 0) {
5054 ahc_clear_critical_section(ahc);
5055 intstat = ahc_inb(ahc, INTSTAT);
5056 }
5047 } while (--maxloops 5057 } while (--maxloops
5048 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) 5058 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
5049 && ((intstat & INT_PEND) != 0 5059 && ((intstat & INT_PEND) != 0
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 687f19e9cf03..c932b3b94490 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -125,12 +125,6 @@
125 125
126static struct scsi_transport_template *ahc_linux_transport_template = NULL; 126static struct scsi_transport_template *ahc_linux_transport_template = NULL;
127 127
128/*
129 * Include aiclib.c as part of our
130 * "module dependencies are hard" work around.
131 */
132#include "aiclib.c"
133
134#include <linux/init.h> /* __setup */ 128#include <linux/init.h> /* __setup */
135#include <linux/mm.h> /* For fetching system memory size */ 129#include <linux/mm.h> /* For fetching system memory size */
136#include <linux/blkdev.h> /* For block_size() */ 130#include <linux/blkdev.h> /* For block_size() */
@@ -391,7 +385,6 @@ static int ahc_linux_run_command(struct ahc_softc*,
391 struct ahc_linux_device *, 385 struct ahc_linux_device *,
392 struct scsi_cmnd *); 386 struct scsi_cmnd *);
393static void ahc_linux_setup_tag_info_global(char *p); 387static void ahc_linux_setup_tag_info_global(char *p);
394static aic_option_callback_t ahc_linux_setup_tag_info;
395static int aic7xxx_setup(char *s); 388static int aic7xxx_setup(char *s);
396 389
397static int ahc_linux_unit; 390static int ahc_linux_unit;
@@ -635,6 +628,8 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
635 628
636 targ->sdev[sdev->lun] = sdev; 629 targ->sdev[sdev->lun] = sdev;
637 630
631 spi_period(starget) = 0;
632
638 return 0; 633 return 0;
639} 634}
640 635
@@ -918,6 +913,86 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
918 } 913 }
919} 914}
920 915
916static char *
917ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
918 void (*callback)(u_long, int, int, int32_t),
919 u_long callback_arg)
920{
921 char *tok_end;
922 char *tok_end2;
923 int i;
924 int instance;
925 int targ;
926 int done;
927 char tok_list[] = {'.', ',', '{', '}', '\0'};
928
929 /* All options use a ':' name/arg separator */
930 if (*opt_arg != ':')
931 return (opt_arg);
932 opt_arg++;
933 instance = -1;
934 targ = -1;
935 done = FALSE;
936 /*
937 * Restore separator that may be in
938 * the middle of our option argument.
939 */
940 tok_end = strchr(opt_arg, '\0');
941 if (tok_end < end)
942 *tok_end = ',';
943 while (!done) {
944 switch (*opt_arg) {
945 case '{':
946 if (instance == -1) {
947 instance = 0;
948 } else {
949 if (depth > 1) {
950 if (targ == -1)
951 targ = 0;
952 } else {
953 printf("Malformed Option %s\n",
954 opt_name);
955 done = TRUE;
956 }
957 }
958 opt_arg++;
959 break;
960 case '}':
961 if (targ != -1)
962 targ = -1;
963 else if (instance != -1)
964 instance = -1;
965 opt_arg++;
966 break;
967 case ',':
968 case '.':
969 if (instance == -1)
970 done = TRUE;
971 else if (targ >= 0)
972 targ++;
973 else if (instance >= 0)
974 instance++;
975 opt_arg++;
976 break;
977 case '\0':
978 done = TRUE;
979 break;
980 default:
981 tok_end = end;
982 for (i = 0; tok_list[i]; i++) {
983 tok_end2 = strchr(opt_arg, tok_list[i]);
984 if ((tok_end2) && (tok_end2 < tok_end))
985 tok_end = tok_end2;
986 }
987 callback(callback_arg, instance, targ,
988 simple_strtol(opt_arg, NULL, 0));
989 opt_arg = tok_end;
990 break;
991 }
992 }
993 return (opt_arg);
994}
995
921/* 996/*
922 * Handle Linux boot parameters. This routine allows for assigning a value 997 * Handle Linux boot parameters. This routine allows for assigning a value
923 * to a parameter with a ':' between the parameter and the value. 998 * to a parameter with a ':' between the parameter and the value.
@@ -972,7 +1047,7 @@ aic7xxx_setup(char *s)
972 if (strncmp(p, "global_tag_depth", n) == 0) { 1047 if (strncmp(p, "global_tag_depth", n) == 0) {
973 ahc_linux_setup_tag_info_global(p + n); 1048 ahc_linux_setup_tag_info_global(p + n);
974 } else if (strncmp(p, "tag_info", n) == 0) { 1049 } else if (strncmp(p, "tag_info", n) == 0) {
975 s = aic_parse_brace_option("tag_info", p + n, end, 1050 s = ahc_parse_brace_option("tag_info", p + n, end,
976 2, ahc_linux_setup_tag_info, 0); 1051 2, ahc_linux_setup_tag_info, 0);
977 } else if (p[n] == ':') { 1052 } else if (p[n] == ':') {
978 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); 1053 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
@@ -1612,9 +1687,9 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
1612 if (channel == 'B') 1687 if (channel == 'B')
1613 target_offset += 8; 1688 target_offset += 8;
1614 starget = ahc->platform_data->starget[target_offset]; 1689 starget = ahc->platform_data->starget[target_offset];
1615 targ = scsi_transport_target_data(starget); 1690 if (starget == NULL)
1616 if (targ == NULL)
1617 break; 1691 break;
1692 targ = scsi_transport_target_data(starget);
1618 1693
1619 target_ppr_options = 1694 target_ppr_options =
1620 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) 1695 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
@@ -2329,8 +2404,6 @@ ahc_platform_dump_card_state(struct ahc_softc *ahc)
2329{ 2404{
2330} 2405}
2331 2406
2332static void ahc_linux_exit(void);
2333
2334static void ahc_linux_set_width(struct scsi_target *starget, int width) 2407static void ahc_linux_set_width(struct scsi_target *starget, int width)
2335{ 2408{
2336 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2409 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 0e47ac217549..c52996269240 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -265,7 +265,7 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
265/***************************** SMP support ************************************/ 265/***************************** SMP support ************************************/
266#include <linux/spinlock.h> 266#include <linux/spinlock.h>
267 267
268#define AIC7XXX_DRIVER_VERSION "6.2.36" 268#define AIC7XXX_DRIVER_VERSION "7.0"
269 269
270/*************************** Device Data Structures ***************************/ 270/*************************** Device Data Structures ***************************/
271/* 271/*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 9d318ce2c993..0d44a6907dd2 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -149,6 +149,27 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
149 ahc_free(ahc); 149 ahc_free(ahc);
150} 150}
151 151
152static void
153ahc_linux_pci_inherit_flags(struct ahc_softc *ahc)
154{
155 struct pci_dev *pdev = ahc->dev_softc, *master_pdev;
156 unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
157
158 master_pdev = pci_get_slot(pdev->bus, master_devfn);
159 if (master_pdev) {
160 struct ahc_softc *master = pci_get_drvdata(master_pdev);
161 if (master) {
162 ahc->flags &= ~AHC_BIOS_ENABLED;
163 ahc->flags |= master->flags & AHC_BIOS_ENABLED;
164
165 ahc->flags &= ~AHC_PRIMARY_CHANNEL;
166 ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL;
167 } else
168 printk(KERN_ERR "aic7xxx: no multichannel peer found!\n");
169 pci_dev_put(master_pdev);
170 }
171}
172
152static int 173static int
153ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 174ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
154{ 175{
@@ -203,6 +224,14 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
203 ahc_free(ahc); 224 ahc_free(ahc);
204 return (-error); 225 return (-error);
205 } 226 }
227
228 /*
229 * Second Function PCI devices need to inherit some
230 * settings from function 0.
231 */
232 if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
233 ahc_linux_pci_inherit_flags(ahc);
234
206 pci_set_drvdata(pdev, ahc); 235 pci_set_drvdata(pdev, ahc);
207 ahc_linux_register_host(ahc, &aic7xxx_driver_template); 236 ahc_linux_register_host(ahc, &aic7xxx_driver_template);
208 return (0); 237 return (0);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 3802c91f0b07..04a3506cf340 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -54,6 +54,49 @@ static void ahc_dump_device_state(struct info_str *info,
54static int ahc_proc_write_seeprom(struct ahc_softc *ahc, 54static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
55 char *buffer, int length); 55 char *buffer, int length);
56 56
57/*
58 * Table of syncrates that don't follow the "divisible by 4"
59 * rule. This table will be expanded in future SCSI specs.
60 */
61static struct {
62 u_int period_factor;
63 u_int period; /* in 100ths of ns */
64} scsi_syncrates[] = {
65 { 0x08, 625 }, /* FAST-160 */
66 { 0x09, 1250 }, /* FAST-80 */
67 { 0x0a, 2500 }, /* FAST-40 40MHz */
68 { 0x0b, 3030 }, /* FAST-40 33MHz */
69 { 0x0c, 5000 } /* FAST-20 */
70};
71
72/*
73 * Return the frequency in kHz corresponding to the given
74 * sync period factor.
75 */
76static u_int
77ahc_calc_syncsrate(u_int period_factor)
78{
79 int i;
80 int num_syncrates;
81
82 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
83 /* See if the period is in the "exception" table */
84 for (i = 0; i < num_syncrates; i++) {
85
86 if (period_factor == scsi_syncrates[i].period_factor) {
87 /* Period in kHz */
88 return (100000000 / scsi_syncrates[i].period);
89 }
90 }
91
92 /*
93 * Wasn't in the table, so use the standard
94 * 4 times conversion.
95 */
96 return (10000000 / (period_factor * 4 * 10));
97}
98
99
57static void 100static void
58copy_mem_info(struct info_str *info, char *data, int len) 101copy_mem_info(struct info_str *info, char *data, int len)
59{ 102{
@@ -106,7 +149,7 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
106 speed = 3300; 149 speed = 3300;
107 freq = 0; 150 freq = 0;
108 if (tinfo->offset != 0) { 151 if (tinfo->offset != 0) {
109 freq = aic_calc_syncsrate(tinfo->period); 152 freq = ahc_calc_syncsrate(tinfo->period);
110 speed = freq; 153 speed = freq;
111 } 154 }
112 speed *= (0x01 << tinfo->width); 155 speed *= (0x01 << tinfo->width);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index 7c1390ed1179..2ce1febca207 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -2,8 +2,8 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8typedef int (ahc_reg_print_t)(u_int, u_int *, u_int); 8typedef int (ahc_reg_print_t)(u_int, u_int *, u_int);
9typedef struct ahc_reg_parse_entry { 9typedef struct ahc_reg_parse_entry {
@@ -1298,7 +1298,6 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
1298#define CMDSIZE_TABLE_TAIL 0x34 1298#define CMDSIZE_TABLE_TAIL 0x34
1299 1299
1300#define MWI_RESIDUAL 0x38 1300#define MWI_RESIDUAL 0x38
1301#define TARG_IMMEDIATE_SCB 0x38
1302 1301
1303#define NEXT_QUEUED_SCB 0x39 1302#define NEXT_QUEUED_SCB 0x39
1304 1303
@@ -1380,6 +1379,7 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
1380#define RETURN_2 0x52 1379#define RETURN_2 0x52
1381 1380
1382#define LAST_MSG 0x53 1381#define LAST_MSG 0x53
1382#define TARG_IMMEDIATE_SCB 0x53
1383 1383
1384#define SCSISEQ_TEMPLATE 0x54 1384#define SCSISEQ_TEMPLATE 0x54
1385#define ENSELO 0x40 1385#define ENSELO 0x40
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 9c713775d44a..88bfd767c51c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -2,8 +2,8 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8 8
9#include "aic7xxx_osm.h" 9#include "aic7xxx_osm.h"
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index cf411368a871..4cee08521e75 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -2,13 +2,13 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static uint8_t seqprog[] = {
9 0xb2, 0x00, 0x00, 0x08, 9 0xb2, 0x00, 0x00, 0x08,
10 0xf7, 0x11, 0x22, 0x08, 10 0xf7, 0x11, 0x22, 0x08,
11 0x00, 0x65, 0xec, 0x59, 11 0x00, 0x65, 0xee, 0x59,
12 0xf7, 0x01, 0x02, 0x08, 12 0xf7, 0x01, 0x02, 0x08,
13 0xff, 0x6a, 0x24, 0x08, 13 0xff, 0x6a, 0x24, 0x08,
14 0x40, 0x00, 0x40, 0x68, 14 0x40, 0x00, 0x40, 0x68,
@@ -21,15 +21,15 @@ static uint8_t seqprog[] = {
21 0x01, 0x4d, 0xc8, 0x30, 21 0x01, 0x4d, 0xc8, 0x30,
22 0x00, 0x4c, 0x12, 0x70, 22 0x00, 0x4c, 0x12, 0x70,
23 0x01, 0x39, 0xa2, 0x30, 23 0x01, 0x39, 0xa2, 0x30,
24 0x00, 0x6a, 0xc0, 0x5e, 24 0x00, 0x6a, 0xc2, 0x5e,
25 0x01, 0x51, 0x20, 0x31, 25 0x01, 0x51, 0x20, 0x31,
26 0x01, 0x57, 0xae, 0x00, 26 0x01, 0x57, 0xae, 0x00,
27 0x0d, 0x6a, 0x76, 0x00, 27 0x0d, 0x6a, 0x76, 0x00,
28 0x00, 0x51, 0x12, 0x5e, 28 0x00, 0x51, 0x14, 0x5e,
29 0x01, 0x51, 0xc8, 0x30, 29 0x01, 0x51, 0xc8, 0x30,
30 0x00, 0x39, 0xc8, 0x60, 30 0x00, 0x39, 0xc8, 0x60,
31 0x00, 0xbb, 0x30, 0x70, 31 0x00, 0xbb, 0x30, 0x70,
32 0xc1, 0x6a, 0xd8, 0x5e, 32 0xc1, 0x6a, 0xda, 0x5e,
33 0x01, 0xbf, 0x72, 0x30, 33 0x01, 0xbf, 0x72, 0x30,
34 0x01, 0x40, 0x7e, 0x31, 34 0x01, 0x40, 0x7e, 0x31,
35 0x01, 0x90, 0x80, 0x30, 35 0x01, 0x90, 0x80, 0x30,
@@ -49,10 +49,10 @@ static uint8_t seqprog[] = {
49 0x08, 0x6a, 0x78, 0x00, 49 0x08, 0x6a, 0x78, 0x00,
50 0x01, 0x50, 0xc8, 0x30, 50 0x01, 0x50, 0xc8, 0x30,
51 0xe0, 0x6a, 0xcc, 0x00, 51 0xe0, 0x6a, 0xcc, 0x00,
52 0x48, 0x6a, 0xfc, 0x5d, 52 0x48, 0x6a, 0xfe, 0x5d,
53 0x01, 0x6a, 0xdc, 0x01, 53 0x01, 0x6a, 0xdc, 0x01,
54 0x88, 0x6a, 0xcc, 0x00, 54 0x88, 0x6a, 0xcc, 0x00,
55 0x48, 0x6a, 0xfc, 0x5d, 55 0x48, 0x6a, 0xfe, 0x5d,
56 0x01, 0x6a, 0x26, 0x01, 56 0x01, 0x6a, 0x26, 0x01,
57 0xf0, 0x19, 0x7a, 0x08, 57 0xf0, 0x19, 0x7a, 0x08,
58 0x0f, 0x18, 0xc8, 0x08, 58 0x0f, 0x18, 0xc8, 0x08,
@@ -93,7 +93,7 @@ static uint8_t seqprog[] = {
93 0x00, 0x65, 0x20, 0x41, 93 0x00, 0x65, 0x20, 0x41,
94 0x02, 0x57, 0xae, 0x00, 94 0x02, 0x57, 0xae, 0x00,
95 0x00, 0x65, 0x9e, 0x40, 95 0x00, 0x65, 0x9e, 0x40,
96 0x61, 0x6a, 0xd8, 0x5e, 96 0x61, 0x6a, 0xda, 0x5e,
97 0x08, 0x51, 0x20, 0x71, 97 0x08, 0x51, 0x20, 0x71,
98 0x02, 0x0b, 0xb2, 0x78, 98 0x02, 0x0b, 0xb2, 0x78,
99 0x00, 0x65, 0xae, 0x40, 99 0x00, 0x65, 0xae, 0x40,
@@ -106,7 +106,7 @@ static uint8_t seqprog[] = {
106 0x80, 0x3d, 0x7a, 0x00, 106 0x80, 0x3d, 0x7a, 0x00,
107 0x20, 0x6a, 0x16, 0x00, 107 0x20, 0x6a, 0x16, 0x00,
108 0x00, 0x65, 0xcc, 0x41, 108 0x00, 0x65, 0xcc, 0x41,
109 0x00, 0x65, 0xb2, 0x5e, 109 0x00, 0x65, 0xb4, 0x5e,
110 0x00, 0x65, 0x12, 0x40, 110 0x00, 0x65, 0x12, 0x40,
111 0x20, 0x11, 0xd2, 0x68, 111 0x20, 0x11, 0xd2, 0x68,
112 0x20, 0x6a, 0x18, 0x00, 112 0x20, 0x6a, 0x18, 0x00,
@@ -140,27 +140,27 @@ static uint8_t seqprog[] = {
140 0x80, 0x0b, 0xc4, 0x79, 140 0x80, 0x0b, 0xc4, 0x79,
141 0x12, 0x01, 0x02, 0x00, 141 0x12, 0x01, 0x02, 0x00,
142 0x01, 0xab, 0xac, 0x30, 142 0x01, 0xab, 0xac, 0x30,
143 0xe4, 0x6a, 0x6e, 0x5d, 143 0xe4, 0x6a, 0x70, 0x5d,
144 0x40, 0x6a, 0x16, 0x00, 144 0x40, 0x6a, 0x16, 0x00,
145 0x80, 0x3e, 0x84, 0x5d, 145 0x80, 0x3e, 0x86, 0x5d,
146 0x20, 0xb8, 0x18, 0x79, 146 0x20, 0xb8, 0x18, 0x79,
147 0x20, 0x6a, 0x84, 0x5d, 147 0x20, 0x6a, 0x86, 0x5d,
148 0x00, 0xab, 0x84, 0x5d, 148 0x00, 0xab, 0x86, 0x5d,
149 0x01, 0xa9, 0x78, 0x30, 149 0x01, 0xa9, 0x78, 0x30,
150 0x10, 0xb8, 0x20, 0x79, 150 0x10, 0xb8, 0x20, 0x79,
151 0xe4, 0x6a, 0x6e, 0x5d, 151 0xe4, 0x6a, 0x70, 0x5d,
152 0x00, 0x65, 0xae, 0x40, 152 0x00, 0x65, 0xae, 0x40,
153 0x10, 0x03, 0x3c, 0x69, 153 0x10, 0x03, 0x3c, 0x69,
154 0x08, 0x3c, 0x5a, 0x69, 154 0x08, 0x3c, 0x5a, 0x69,
155 0x04, 0x3c, 0x92, 0x69, 155 0x04, 0x3c, 0x92, 0x69,
156 0x02, 0x3c, 0x98, 0x69, 156 0x02, 0x3c, 0x98, 0x69,
157 0x01, 0x3c, 0x44, 0x79, 157 0x01, 0x3c, 0x44, 0x79,
158 0xff, 0x6a, 0x70, 0x00, 158 0xff, 0x6a, 0xa6, 0x00,
159 0x00, 0x65, 0xa4, 0x59, 159 0x00, 0x65, 0xa4, 0x59,
160 0x00, 0x6a, 0xc0, 0x5e, 160 0x00, 0x6a, 0xc2, 0x5e,
161 0xff, 0x38, 0x30, 0x71, 161 0xff, 0x53, 0x30, 0x71,
162 0x0d, 0x6a, 0x76, 0x00, 162 0x0d, 0x6a, 0x76, 0x00,
163 0x00, 0x38, 0x12, 0x5e, 163 0x00, 0x53, 0x14, 0x5e,
164 0x00, 0x65, 0xea, 0x58, 164 0x00, 0x65, 0xea, 0x58,
165 0x12, 0x01, 0x02, 0x00, 165 0x12, 0x01, 0x02, 0x00,
166 0x00, 0x65, 0x18, 0x41, 166 0x00, 0x65, 0x18, 0x41,
@@ -168,10 +168,10 @@ static uint8_t seqprog[] = {
168 0x00, 0x65, 0xf2, 0x58, 168 0x00, 0x65, 0xf2, 0x58,
169 0xfd, 0x57, 0xae, 0x08, 169 0xfd, 0x57, 0xae, 0x08,
170 0x00, 0x65, 0xae, 0x40, 170 0x00, 0x65, 0xae, 0x40,
171 0xe4, 0x6a, 0x6e, 0x5d, 171 0xe4, 0x6a, 0x70, 0x5d,
172 0x20, 0x3c, 0x4a, 0x79, 172 0x20, 0x3c, 0x4a, 0x79,
173 0x02, 0x6a, 0x84, 0x5d, 173 0x02, 0x6a, 0x86, 0x5d,
174 0x04, 0x6a, 0x84, 0x5d, 174 0x04, 0x6a, 0x86, 0x5d,
175 0x01, 0x03, 0x4c, 0x69, 175 0x01, 0x03, 0x4c, 0x69,
176 0xf7, 0x11, 0x22, 0x08, 176 0xf7, 0x11, 0x22, 0x08,
177 0xff, 0x6a, 0x24, 0x08, 177 0xff, 0x6a, 0x24, 0x08,
@@ -182,13 +182,13 @@ static uint8_t seqprog[] = {
182 0x80, 0x86, 0xc8, 0x08, 182 0x80, 0x86, 0xc8, 0x08,
183 0x01, 0x4f, 0xc8, 0x30, 183 0x01, 0x4f, 0xc8, 0x30,
184 0x00, 0x50, 0x6c, 0x61, 184 0x00, 0x50, 0x6c, 0x61,
185 0xc4, 0x6a, 0x6e, 0x5d, 185 0xc4, 0x6a, 0x70, 0x5d,
186 0x40, 0x3c, 0x68, 0x79, 186 0x40, 0x3c, 0x68, 0x79,
187 0x28, 0x6a, 0x84, 0x5d, 187 0x28, 0x6a, 0x86, 0x5d,
188 0x00, 0x65, 0x4c, 0x41, 188 0x00, 0x65, 0x4c, 0x41,
189 0x08, 0x6a, 0x84, 0x5d, 189 0x08, 0x6a, 0x86, 0x5d,
190 0x00, 0x65, 0x4c, 0x41, 190 0x00, 0x65, 0x4c, 0x41,
191 0x84, 0x6a, 0x6e, 0x5d, 191 0x84, 0x6a, 0x70, 0x5d,
192 0x00, 0x65, 0xf2, 0x58, 192 0x00, 0x65, 0xf2, 0x58,
193 0x01, 0x66, 0xc8, 0x30, 193 0x01, 0x66, 0xc8, 0x30,
194 0x01, 0x64, 0xd8, 0x31, 194 0x01, 0x64, 0xd8, 0x31,
@@ -208,16 +208,16 @@ static uint8_t seqprog[] = {
208 0xf7, 0x3c, 0x78, 0x08, 208 0xf7, 0x3c, 0x78, 0x08,
209 0x00, 0x65, 0x20, 0x41, 209 0x00, 0x65, 0x20, 0x41,
210 0x40, 0xaa, 0x7e, 0x10, 210 0x40, 0xaa, 0x7e, 0x10,
211 0x04, 0xaa, 0x6e, 0x5d, 211 0x04, 0xaa, 0x70, 0x5d,
212 0x00, 0x65, 0x56, 0x42, 212 0x00, 0x65, 0x58, 0x42,
213 0xc4, 0x6a, 0x6e, 0x5d, 213 0xc4, 0x6a, 0x70, 0x5d,
214 0xc0, 0x6a, 0x7e, 0x00, 214 0xc0, 0x6a, 0x7e, 0x00,
215 0x00, 0xa8, 0x84, 0x5d, 215 0x00, 0xa8, 0x86, 0x5d,
216 0xe4, 0x6a, 0x06, 0x00, 216 0xe4, 0x6a, 0x06, 0x00,
217 0x00, 0x6a, 0x84, 0x5d, 217 0x00, 0x6a, 0x86, 0x5d,
218 0x00, 0x65, 0x4c, 0x41, 218 0x00, 0x65, 0x4c, 0x41,
219 0x10, 0x3c, 0xa8, 0x69, 219 0x10, 0x3c, 0xa8, 0x69,
220 0x00, 0xbb, 0x8a, 0x44, 220 0x00, 0xbb, 0x8c, 0x44,
221 0x18, 0x6a, 0xda, 0x01, 221 0x18, 0x6a, 0xda, 0x01,
222 0x01, 0x69, 0xd8, 0x31, 222 0x01, 0x69, 0xd8, 0x31,
223 0x1c, 0x6a, 0xd0, 0x01, 223 0x1c, 0x6a, 0xd0, 0x01,
@@ -227,31 +227,32 @@ static uint8_t seqprog[] = {
227 0x01, 0x93, 0x26, 0x01, 227 0x01, 0x93, 0x26, 0x01,
228 0x03, 0x6a, 0x2a, 0x01, 228 0x03, 0x6a, 0x2a, 0x01,
229 0x01, 0x69, 0x32, 0x31, 229 0x01, 0x69, 0x32, 0x31,
230 0x1c, 0x6a, 0xe0, 0x5d, 230 0x1c, 0x6a, 0xe2, 0x5d,
231 0x0a, 0x93, 0x26, 0x01, 231 0x0a, 0x93, 0x26, 0x01,
232 0x00, 0x65, 0xa8, 0x5e, 232 0x00, 0x65, 0xaa, 0x5e,
233 0x01, 0x50, 0xa0, 0x18, 233 0x01, 0x50, 0xa0, 0x18,
234 0x02, 0x6a, 0x22, 0x05, 234 0x02, 0x6a, 0x22, 0x05,
235 0x1a, 0x01, 0x02, 0x00, 235 0x1a, 0x01, 0x02, 0x00,
236 0x80, 0x6a, 0x74, 0x00, 236 0x80, 0x6a, 0x74, 0x00,
237 0x40, 0x6a, 0x78, 0x00, 237 0x40, 0x6a, 0x78, 0x00,
238 0x40, 0x6a, 0x16, 0x00, 238 0x40, 0x6a, 0x16, 0x00,
239 0x00, 0x65, 0xd8, 0x5d, 239 0x00, 0x65, 0xda, 0x5d,
240 0x01, 0x3f, 0xc8, 0x30, 240 0x01, 0x3f, 0xc8, 0x30,
241 0xbf, 0x64, 0x56, 0x7a, 241 0xbf, 0x64, 0x58, 0x7a,
242 0x80, 0x64, 0x9e, 0x73, 242 0x80, 0x64, 0xa0, 0x73,
243 0xa0, 0x64, 0x00, 0x74, 243 0xa0, 0x64, 0x02, 0x74,
244 0xc0, 0x64, 0xf4, 0x73, 244 0xc0, 0x64, 0xf6, 0x73,
245 0xe0, 0x64, 0x30, 0x74, 245 0xe0, 0x64, 0x32, 0x74,
246 0x01, 0x6a, 0xd8, 0x5e, 246 0x01, 0x6a, 0xda, 0x5e,
247 0x00, 0x65, 0xcc, 0x41, 247 0x00, 0x65, 0xcc, 0x41,
248 0xf7, 0x11, 0x22, 0x08, 248 0xf7, 0x11, 0x22, 0x08,
249 0x01, 0x06, 0xd4, 0x30, 249 0x01, 0x06, 0xd4, 0x30,
250 0xff, 0x6a, 0x24, 0x08, 250 0xff, 0x6a, 0x24, 0x08,
251 0xf7, 0x01, 0x02, 0x08, 251 0xf7, 0x01, 0x02, 0x08,
252 0x09, 0x0c, 0xe6, 0x79, 252 0xc0, 0x6a, 0x78, 0x00,
253 0x09, 0x0c, 0xe8, 0x79,
253 0x08, 0x0c, 0x04, 0x68, 254 0x08, 0x0c, 0x04, 0x68,
254 0xb1, 0x6a, 0xd8, 0x5e, 255 0xb1, 0x6a, 0xda, 0x5e,
255 0xff, 0x6a, 0x26, 0x09, 256 0xff, 0x6a, 0x26, 0x09,
256 0x12, 0x01, 0x02, 0x00, 257 0x12, 0x01, 0x02, 0x00,
257 0x02, 0x6a, 0x08, 0x30, 258 0x02, 0x6a, 0x08, 0x30,
@@ -264,29 +265,29 @@ static uint8_t seqprog[] = {
264 0x00, 0xa5, 0x4a, 0x21, 265 0x00, 0xa5, 0x4a, 0x21,
265 0x00, 0xa6, 0x4c, 0x21, 266 0x00, 0xa6, 0x4c, 0x21,
266 0x00, 0xa7, 0x4e, 0x25, 267 0x00, 0xa7, 0x4e, 0x25,
267 0x08, 0xeb, 0xdc, 0x7e, 268 0x08, 0xeb, 0xde, 0x7e,
268 0x80, 0xeb, 0x06, 0x7a, 269 0x80, 0xeb, 0x08, 0x7a,
269 0xff, 0x6a, 0xd6, 0x09, 270 0xff, 0x6a, 0xd6, 0x09,
270 0x08, 0xeb, 0x0a, 0x6a, 271 0x08, 0xeb, 0x0c, 0x6a,
271 0xff, 0x6a, 0xd4, 0x0c, 272 0xff, 0x6a, 0xd4, 0x0c,
272 0x80, 0xa3, 0xdc, 0x6e, 273 0x80, 0xa3, 0xde, 0x6e,
273 0x88, 0xeb, 0x20, 0x72, 274 0x88, 0xeb, 0x22, 0x72,
274 0x08, 0xeb, 0xdc, 0x6e, 275 0x08, 0xeb, 0xde, 0x6e,
275 0x04, 0xea, 0x24, 0xe2, 276 0x04, 0xea, 0x26, 0xe2,
276 0x08, 0xee, 0xdc, 0x6e, 277 0x08, 0xee, 0xde, 0x6e,
277 0x04, 0x6a, 0xd0, 0x81, 278 0x04, 0x6a, 0xd0, 0x81,
278 0x05, 0xa4, 0xc0, 0x89, 279 0x05, 0xa4, 0xc0, 0x89,
279 0x03, 0xa5, 0xc2, 0x31, 280 0x03, 0xa5, 0xc2, 0x31,
280 0x09, 0x6a, 0xd6, 0x05, 281 0x09, 0x6a, 0xd6, 0x05,
281 0x00, 0x65, 0x08, 0x5a, 282 0x00, 0x65, 0x0a, 0x5a,
282 0x06, 0xa4, 0xd4, 0x89, 283 0x06, 0xa4, 0xd4, 0x89,
283 0x80, 0x94, 0xdc, 0x7e, 284 0x80, 0x94, 0xde, 0x7e,
284 0x07, 0xe9, 0x10, 0x31, 285 0x07, 0xe9, 0x10, 0x31,
285 0x01, 0xe9, 0x46, 0x31, 286 0x01, 0xe9, 0x46, 0x31,
286 0x00, 0xa3, 0xba, 0x5e, 287 0x00, 0xa3, 0xbc, 0x5e,
287 0x00, 0x65, 0xfa, 0x59, 288 0x00, 0x65, 0xfc, 0x59,
288 0x01, 0xa4, 0xca, 0x30, 289 0x01, 0xa4, 0xca, 0x30,
289 0x80, 0xa3, 0x34, 0x7a, 290 0x80, 0xa3, 0x36, 0x7a,
290 0x02, 0x65, 0xca, 0x00, 291 0x02, 0x65, 0xca, 0x00,
291 0x01, 0x65, 0xf8, 0x31, 292 0x01, 0x65, 0xf8, 0x31,
292 0x80, 0x93, 0x26, 0x01, 293 0x80, 0x93, 0x26, 0x01,
@@ -294,162 +295,162 @@ static uint8_t seqprog[] = {
294 0x01, 0x8c, 0xc8, 0x30, 295 0x01, 0x8c, 0xc8, 0x30,
295 0x00, 0x88, 0xc8, 0x18, 296 0x00, 0x88, 0xc8, 0x18,
296 0x02, 0x64, 0xc8, 0x88, 297 0x02, 0x64, 0xc8, 0x88,
297 0xff, 0x64, 0xdc, 0x7e, 298 0xff, 0x64, 0xde, 0x7e,
298 0xff, 0x8d, 0x4a, 0x6a, 299 0xff, 0x8d, 0x4c, 0x6a,
299 0xff, 0x8e, 0x4a, 0x6a, 300 0xff, 0x8e, 0x4c, 0x6a,
300 0x03, 0x8c, 0xd4, 0x98, 301 0x03, 0x8c, 0xd4, 0x98,
301 0x00, 0x65, 0xdc, 0x56, 302 0x00, 0x65, 0xde, 0x56,
302 0x01, 0x64, 0x70, 0x30, 303 0x01, 0x64, 0x70, 0x30,
303 0xff, 0x64, 0xc8, 0x10, 304 0xff, 0x64, 0xc8, 0x10,
304 0x01, 0x64, 0xc8, 0x18, 305 0x01, 0x64, 0xc8, 0x18,
305 0x00, 0x8c, 0x18, 0x19, 306 0x00, 0x8c, 0x18, 0x19,
306 0xff, 0x8d, 0x1a, 0x21, 307 0xff, 0x8d, 0x1a, 0x21,
307 0xff, 0x8e, 0x1c, 0x25, 308 0xff, 0x8e, 0x1c, 0x25,
308 0xc0, 0x3c, 0x5a, 0x7a, 309 0xc0, 0x3c, 0x5c, 0x7a,
309 0x21, 0x6a, 0xd8, 0x5e, 310 0x21, 0x6a, 0xda, 0x5e,
310 0xa8, 0x6a, 0x76, 0x00, 311 0xa8, 0x6a, 0x76, 0x00,
311 0x79, 0x6a, 0x76, 0x00, 312 0x79, 0x6a, 0x76, 0x00,
312 0x40, 0x3f, 0x62, 0x6a, 313 0x40, 0x3f, 0x64, 0x6a,
313 0x04, 0x3b, 0x76, 0x00, 314 0x04, 0x3b, 0x76, 0x00,
314 0x04, 0x6a, 0xd4, 0x81, 315 0x04, 0x6a, 0xd4, 0x81,
315 0x20, 0x3c, 0x6a, 0x7a, 316 0x20, 0x3c, 0x6c, 0x7a,
316 0x51, 0x6a, 0xd8, 0x5e, 317 0x51, 0x6a, 0xda, 0x5e,
317 0x00, 0x65, 0x82, 0x42, 318 0x00, 0x65, 0x84, 0x42,
318 0x20, 0x3c, 0x78, 0x00, 319 0x20, 0x3c, 0x78, 0x00,
319 0x00, 0xb3, 0xba, 0x5e, 320 0x00, 0xb3, 0xbc, 0x5e,
320 0x07, 0xac, 0x10, 0x31, 321 0x07, 0xac, 0x10, 0x31,
321 0x05, 0xb3, 0x46, 0x31, 322 0x05, 0xb3, 0x46, 0x31,
322 0x88, 0x6a, 0xcc, 0x00, 323 0x88, 0x6a, 0xcc, 0x00,
323 0xac, 0x6a, 0xee, 0x5d, 324 0xac, 0x6a, 0xf0, 0x5d,
324 0xa3, 0x6a, 0xcc, 0x00, 325 0xa3, 0x6a, 0xcc, 0x00,
325 0xb3, 0x6a, 0xf2, 0x5d, 326 0xb3, 0x6a, 0xf4, 0x5d,
326 0x00, 0x65, 0x3a, 0x5a, 327 0x00, 0x65, 0x3c, 0x5a,
327 0xfd, 0xa4, 0x48, 0x09, 328 0xfd, 0xa4, 0x48, 0x09,
328 0x03, 0x8c, 0x10, 0x30, 329 0x03, 0x8c, 0x10, 0x30,
329 0x00, 0x65, 0xe6, 0x5d, 330 0x00, 0x65, 0xe8, 0x5d,
330 0x01, 0xa4, 0x94, 0x7a, 331 0x01, 0xa4, 0x96, 0x7a,
331 0x04, 0x3b, 0x76, 0x08, 332 0x04, 0x3b, 0x76, 0x08,
332 0x01, 0x3b, 0x26, 0x31, 333 0x01, 0x3b, 0x26, 0x31,
333 0x80, 0x02, 0x04, 0x00, 334 0x80, 0x02, 0x04, 0x00,
334 0x10, 0x0c, 0x8a, 0x7a, 335 0x10, 0x0c, 0x8c, 0x7a,
335 0x03, 0x9e, 0x8c, 0x6a, 336 0x03, 0x9e, 0x8e, 0x6a,
336 0x7f, 0x02, 0x04, 0x08, 337 0x7f, 0x02, 0x04, 0x08,
337 0x91, 0x6a, 0xd8, 0x5e, 338 0x91, 0x6a, 0xda, 0x5e,
338 0x00, 0x65, 0xcc, 0x41, 339 0x00, 0x65, 0xcc, 0x41,
339 0x01, 0xa4, 0xca, 0x30, 340 0x01, 0xa4, 0xca, 0x30,
340 0x80, 0xa3, 0x9a, 0x7a, 341 0x80, 0xa3, 0x9c, 0x7a,
341 0x02, 0x65, 0xca, 0x00, 342 0x02, 0x65, 0xca, 0x00,
342 0x01, 0x65, 0xf8, 0x31, 343 0x01, 0x65, 0xf8, 0x31,
343 0x01, 0x3b, 0x26, 0x31, 344 0x01, 0x3b, 0x26, 0x31,
344 0x00, 0x65, 0x0e, 0x5a, 345 0x00, 0x65, 0x10, 0x5a,
345 0x01, 0xfc, 0xa8, 0x6a, 346 0x01, 0xfc, 0xaa, 0x6a,
346 0x80, 0x0b, 0x9e, 0x6a, 347 0x80, 0x0b, 0xa0, 0x6a,
347 0x10, 0x0c, 0x9e, 0x7a, 348 0x10, 0x0c, 0xa0, 0x7a,
348 0x20, 0x93, 0x9e, 0x6a, 349 0x20, 0x93, 0xa0, 0x6a,
349 0x02, 0x93, 0x26, 0x01, 350 0x02, 0x93, 0x26, 0x01,
350 0x02, 0xfc, 0xb2, 0x7a, 351 0x02, 0xfc, 0xb4, 0x7a,
351 0x40, 0x0d, 0xc6, 0x6a, 352 0x40, 0x0d, 0xc8, 0x6a,
352 0x01, 0xa4, 0x48, 0x01, 353 0x01, 0xa4, 0x48, 0x01,
353 0x00, 0x65, 0xc6, 0x42, 354 0x00, 0x65, 0xc8, 0x42,
354 0x40, 0x0d, 0xb8, 0x6a, 355 0x40, 0x0d, 0xba, 0x6a,
355 0x00, 0x65, 0x0e, 0x5a, 356 0x00, 0x65, 0x10, 0x5a,
356 0x00, 0x65, 0xaa, 0x42, 357 0x00, 0x65, 0xac, 0x42,
357 0x80, 0xfc, 0xc2, 0x7a, 358 0x80, 0xfc, 0xc4, 0x7a,
358 0x80, 0xa4, 0xc2, 0x6a, 359 0x80, 0xa4, 0xc4, 0x6a,
359 0xff, 0xa5, 0x4a, 0x19, 360 0xff, 0xa5, 0x4a, 0x19,
360 0xff, 0xa6, 0x4c, 0x21, 361 0xff, 0xa6, 0x4c, 0x21,
361 0xff, 0xa7, 0x4e, 0x21, 362 0xff, 0xa7, 0x4e, 0x21,
362 0xf8, 0xfc, 0x48, 0x09, 363 0xf8, 0xfc, 0x48, 0x09,
363 0x7f, 0xa3, 0x46, 0x09, 364 0x7f, 0xa3, 0x46, 0x09,
364 0x04, 0x3b, 0xe2, 0x6a, 365 0x04, 0x3b, 0xe4, 0x6a,
365 0x02, 0x93, 0x26, 0x01, 366 0x02, 0x93, 0x26, 0x01,
366 0x01, 0x94, 0xc8, 0x7a, 367 0x01, 0x94, 0xca, 0x7a,
367 0x01, 0x94, 0xc8, 0x7a, 368 0x01, 0x94, 0xca, 0x7a,
368 0x01, 0x94, 0xc8, 0x7a, 369 0x01, 0x94, 0xca, 0x7a,
369 0x01, 0x94, 0xc8, 0x7a, 370 0x01, 0x94, 0xca, 0x7a,
370 0x01, 0x94, 0xc8, 0x7a, 371 0x01, 0x94, 0xca, 0x7a,
371 0x01, 0xa4, 0xe0, 0x7a, 372 0x01, 0xa4, 0xe2, 0x7a,
372 0x01, 0xfc, 0xd6, 0x7a, 373 0x01, 0xfc, 0xd8, 0x7a,
373 0x01, 0x94, 0xe2, 0x6a, 374 0x01, 0x94, 0xe4, 0x6a,
374 0x01, 0x94, 0xe2, 0x6a, 375 0x01, 0x94, 0xe4, 0x6a,
375 0x01, 0x94, 0xe2, 0x6a, 376 0x01, 0x94, 0xe4, 0x6a,
376 0x00, 0x65, 0x82, 0x42, 377 0x00, 0x65, 0x84, 0x42,
377 0x01, 0x94, 0xe0, 0x7a, 378 0x01, 0x94, 0xe2, 0x7a,
378 0x10, 0x94, 0xe2, 0x6a, 379 0x10, 0x94, 0xe4, 0x6a,
379 0xd7, 0x93, 0x26, 0x09, 380 0xd7, 0x93, 0x26, 0x09,
380 0x28, 0x93, 0xe6, 0x6a, 381 0x28, 0x93, 0xe8, 0x6a,
381 0x01, 0x85, 0x0a, 0x01, 382 0x01, 0x85, 0x0a, 0x01,
382 0x02, 0xfc, 0xee, 0x6a, 383 0x02, 0xfc, 0xf0, 0x6a,
383 0x01, 0x14, 0x46, 0x31, 384 0x01, 0x14, 0x46, 0x31,
384 0xff, 0x6a, 0x10, 0x09, 385 0xff, 0x6a, 0x10, 0x09,
385 0xfe, 0x85, 0x0a, 0x09, 386 0xfe, 0x85, 0x0a, 0x09,
386 0xff, 0x38, 0xfc, 0x6a, 387 0xff, 0x38, 0xfe, 0x6a,
387 0x80, 0xa3, 0xfc, 0x7a, 388 0x80, 0xa3, 0xfe, 0x7a,
388 0x80, 0x0b, 0xfa, 0x7a, 389 0x80, 0x0b, 0xfc, 0x7a,
389 0x04, 0x3b, 0xfc, 0x7a, 390 0x04, 0x3b, 0xfe, 0x7a,
390 0xbf, 0x3b, 0x76, 0x08, 391 0xbf, 0x3b, 0x76, 0x08,
391 0x01, 0x3b, 0x26, 0x31, 392 0x01, 0x3b, 0x26, 0x31,
392 0x00, 0x65, 0x0e, 0x5a, 393 0x00, 0x65, 0x10, 0x5a,
393 0x01, 0x0b, 0x0a, 0x6b, 394 0x01, 0x0b, 0x0c, 0x6b,
394 0x10, 0x0c, 0xfe, 0x7a, 395 0x10, 0x0c, 0x00, 0x7b,
395 0x04, 0x93, 0x08, 0x6b, 396 0x04, 0x93, 0x0a, 0x6b,
396 0x01, 0x94, 0x06, 0x7b, 397 0x01, 0x94, 0x08, 0x7b,
397 0x10, 0x94, 0x08, 0x6b, 398 0x10, 0x94, 0x0a, 0x6b,
398 0xc7, 0x93, 0x26, 0x09, 399 0xc7, 0x93, 0x26, 0x09,
399 0x01, 0x99, 0xd4, 0x30, 400 0x01, 0x99, 0xd4, 0x30,
400 0x38, 0x93, 0x0c, 0x6b, 401 0x38, 0x93, 0x0e, 0x6b,
401 0xff, 0x08, 0x5a, 0x6b, 402 0xff, 0x08, 0x5c, 0x6b,
402 0xff, 0x09, 0x5a, 0x6b, 403 0xff, 0x09, 0x5c, 0x6b,
403 0xff, 0x0a, 0x5a, 0x6b, 404 0xff, 0x0a, 0x5c, 0x6b,
404 0xff, 0x38, 0x28, 0x7b, 405 0xff, 0x38, 0x2a, 0x7b,
405 0x04, 0x14, 0x10, 0x31, 406 0x04, 0x14, 0x10, 0x31,
406 0x01, 0x38, 0x18, 0x31, 407 0x01, 0x38, 0x18, 0x31,
407 0x02, 0x6a, 0x1a, 0x31, 408 0x02, 0x6a, 0x1a, 0x31,
408 0x88, 0x6a, 0xcc, 0x00, 409 0x88, 0x6a, 0xcc, 0x00,
409 0x14, 0x6a, 0xf4, 0x5d, 410 0x14, 0x6a, 0xf6, 0x5d,
410 0x00, 0x38, 0xe0, 0x5d, 411 0x00, 0x38, 0xe2, 0x5d,
411 0xff, 0x6a, 0x70, 0x08, 412 0xff, 0x6a, 0x70, 0x08,
412 0x00, 0x65, 0x54, 0x43, 413 0x00, 0x65, 0x56, 0x43,
413 0x80, 0xa3, 0x2e, 0x7b, 414 0x80, 0xa3, 0x30, 0x7b,
414 0x01, 0xa4, 0x48, 0x01, 415 0x01, 0xa4, 0x48, 0x01,
415 0x00, 0x65, 0x5a, 0x43, 416 0x00, 0x65, 0x5c, 0x43,
416 0x08, 0xeb, 0x34, 0x7b, 417 0x08, 0xeb, 0x36, 0x7b,
417 0x00, 0x65, 0x0e, 0x5a, 418 0x00, 0x65, 0x10, 0x5a,
418 0x08, 0xeb, 0x30, 0x6b, 419 0x08, 0xeb, 0x32, 0x6b,
419 0x07, 0xe9, 0x10, 0x31, 420 0x07, 0xe9, 0x10, 0x31,
420 0x01, 0xe9, 0xca, 0x30, 421 0x01, 0xe9, 0xca, 0x30,
421 0x01, 0x65, 0x46, 0x31, 422 0x01, 0x65, 0x46, 0x31,
422 0x00, 0x6a, 0xba, 0x5e, 423 0x00, 0x6a, 0xbc, 0x5e,
423 0x88, 0x6a, 0xcc, 0x00, 424 0x88, 0x6a, 0xcc, 0x00,
424 0xa4, 0x6a, 0xf4, 0x5d, 425 0xa4, 0x6a, 0xf6, 0x5d,
425 0x08, 0x6a, 0xe0, 0x5d, 426 0x08, 0x6a, 0xe2, 0x5d,
426 0x0d, 0x93, 0x26, 0x01, 427 0x0d, 0x93, 0x26, 0x01,
427 0x00, 0x65, 0xa8, 0x5e, 428 0x00, 0x65, 0xaa, 0x5e,
428 0x88, 0x6a, 0xcc, 0x00, 429 0x88, 0x6a, 0xcc, 0x00,
429 0x00, 0x65, 0x8a, 0x5e, 430 0x00, 0x65, 0x8c, 0x5e,
430 0x01, 0x99, 0x46, 0x31, 431 0x01, 0x99, 0x46, 0x31,
431 0x00, 0xa3, 0xba, 0x5e, 432 0x00, 0xa3, 0xbc, 0x5e,
432 0x01, 0x88, 0x10, 0x31, 433 0x01, 0x88, 0x10, 0x31,
433 0x00, 0x65, 0x3a, 0x5a, 434 0x00, 0x65, 0x3c, 0x5a,
434 0x00, 0x65, 0xfa, 0x59, 435 0x00, 0x65, 0xfc, 0x59,
435 0x03, 0x8c, 0x10, 0x30, 436 0x03, 0x8c, 0x10, 0x30,
436 0x00, 0x65, 0xe6, 0x5d, 437 0x00, 0x65, 0xe8, 0x5d,
437 0x80, 0x0b, 0x82, 0x6a, 438 0x80, 0x0b, 0x84, 0x6a,
438 0x80, 0x0b, 0x62, 0x6b, 439 0x80, 0x0b, 0x64, 0x6b,
439 0x01, 0x0c, 0x5c, 0x7b, 440 0x01, 0x0c, 0x5e, 0x7b,
440 0x10, 0x0c, 0x82, 0x7a, 441 0x10, 0x0c, 0x84, 0x7a,
441 0x03, 0x9e, 0x82, 0x6a, 442 0x03, 0x9e, 0x84, 0x6a,
442 0x00, 0x65, 0x04, 0x5a, 443 0x00, 0x65, 0x06, 0x5a,
443 0x00, 0x6a, 0xba, 0x5e, 444 0x00, 0x6a, 0xbc, 0x5e,
444 0x01, 0xa4, 0x82, 0x6b, 445 0x01, 0xa4, 0x84, 0x6b,
445 0xff, 0x38, 0x78, 0x7b, 446 0xff, 0x38, 0x7a, 0x7b,
446 0x01, 0x38, 0xc8, 0x30, 447 0x01, 0x38, 0xc8, 0x30,
447 0x00, 0x08, 0x40, 0x19, 448 0x00, 0x08, 0x40, 0x19,
448 0xff, 0x6a, 0xc8, 0x08, 449 0xff, 0x6a, 0xc8, 0x08,
449 0x00, 0x09, 0x42, 0x21, 450 0x00, 0x09, 0x42, 0x21,
450 0x00, 0x0a, 0x44, 0x21, 451 0x00, 0x0a, 0x44, 0x21,
451 0xff, 0x6a, 0x70, 0x08, 452 0xff, 0x6a, 0x70, 0x08,
452 0x00, 0x65, 0x7a, 0x43, 453 0x00, 0x65, 0x7c, 0x43,
453 0x03, 0x08, 0x40, 0x31, 454 0x03, 0x08, 0x40, 0x31,
454 0x03, 0x08, 0x40, 0x31, 455 0x03, 0x08, 0x40, 0x31,
455 0x01, 0x08, 0x40, 0x31, 456 0x01, 0x08, 0x40, 0x31,
@@ -461,16 +462,16 @@ static uint8_t seqprog[] = {
461 0x04, 0x3c, 0xcc, 0x79, 462 0x04, 0x3c, 0xcc, 0x79,
462 0xfb, 0x3c, 0x78, 0x08, 463 0xfb, 0x3c, 0x78, 0x08,
463 0x04, 0x93, 0x20, 0x79, 464 0x04, 0x93, 0x20, 0x79,
464 0x01, 0x0c, 0x8e, 0x6b, 465 0x01, 0x0c, 0x90, 0x6b,
465 0x80, 0xba, 0x20, 0x79, 466 0x80, 0xba, 0x20, 0x79,
466 0x80, 0x04, 0x20, 0x79, 467 0x80, 0x04, 0x20, 0x79,
467 0xe4, 0x6a, 0x6e, 0x5d, 468 0xe4, 0x6a, 0x70, 0x5d,
468 0x23, 0x6a, 0x84, 0x5d, 469 0x23, 0x6a, 0x86, 0x5d,
469 0x01, 0x6a, 0x84, 0x5d, 470 0x01, 0x6a, 0x86, 0x5d,
470 0x00, 0x65, 0x20, 0x41, 471 0x00, 0x65, 0x20, 0x41,
471 0x00, 0x65, 0xcc, 0x41, 472 0x00, 0x65, 0xcc, 0x41,
472 0x80, 0x3c, 0xa2, 0x7b, 473 0x80, 0x3c, 0xa4, 0x7b,
473 0x21, 0x6a, 0xd8, 0x5e, 474 0x21, 0x6a, 0xda, 0x5e,
474 0x01, 0xbc, 0x18, 0x31, 475 0x01, 0xbc, 0x18, 0x31,
475 0x02, 0x6a, 0x1a, 0x31, 476 0x02, 0x6a, 0x1a, 0x31,
476 0x02, 0x6a, 0xf8, 0x01, 477 0x02, 0x6a, 0xf8, 0x01,
@@ -480,16 +481,16 @@ static uint8_t seqprog[] = {
480 0xff, 0x6a, 0x12, 0x08, 481 0xff, 0x6a, 0x12, 0x08,
481 0xff, 0x6a, 0x14, 0x08, 482 0xff, 0x6a, 0x14, 0x08,
482 0xf3, 0xbc, 0xd4, 0x18, 483 0xf3, 0xbc, 0xd4, 0x18,
483 0xa0, 0x6a, 0xc8, 0x53, 484 0xa0, 0x6a, 0xca, 0x53,
484 0x04, 0xa0, 0x10, 0x31, 485 0x04, 0xa0, 0x10, 0x31,
485 0xac, 0x6a, 0x26, 0x01, 486 0xac, 0x6a, 0x26, 0x01,
486 0x04, 0xa0, 0x10, 0x31, 487 0x04, 0xa0, 0x10, 0x31,
487 0x03, 0x08, 0x18, 0x31, 488 0x03, 0x08, 0x18, 0x31,
488 0x88, 0x6a, 0xcc, 0x00, 489 0x88, 0x6a, 0xcc, 0x00,
489 0xa0, 0x6a, 0xf4, 0x5d, 490 0xa0, 0x6a, 0xf6, 0x5d,
490 0x00, 0xbc, 0xe0, 0x5d, 491 0x00, 0xbc, 0xe2, 0x5d,
491 0x3d, 0x6a, 0x26, 0x01, 492 0x3d, 0x6a, 0x26, 0x01,
492 0x00, 0x65, 0xe0, 0x43, 493 0x00, 0x65, 0xe2, 0x43,
493 0xff, 0x6a, 0x10, 0x09, 494 0xff, 0x6a, 0x10, 0x09,
494 0xa4, 0x6a, 0x26, 0x01, 495 0xa4, 0x6a, 0x26, 0x01,
495 0x0c, 0xa0, 0x32, 0x31, 496 0x0c, 0xa0, 0x32, 0x31,
@@ -499,128 +500,128 @@ static uint8_t seqprog[] = {
499 0x36, 0x6a, 0x26, 0x01, 500 0x36, 0x6a, 0x26, 0x01,
500 0x02, 0x93, 0x26, 0x01, 501 0x02, 0x93, 0x26, 0x01,
501 0x35, 0x6a, 0x26, 0x01, 502 0x35, 0x6a, 0x26, 0x01,
502 0x00, 0x65, 0x9c, 0x5e, 503 0x00, 0x65, 0x9e, 0x5e,
503 0x00, 0x65, 0x9c, 0x5e, 504 0x00, 0x65, 0x9e, 0x5e,
504 0x02, 0x93, 0x26, 0x01, 505 0x02, 0x93, 0x26, 0x01,
505 0xbf, 0x3c, 0x78, 0x08, 506 0xbf, 0x3c, 0x78, 0x08,
506 0x04, 0x0b, 0xe6, 0x6b, 507 0x04, 0x0b, 0xe8, 0x6b,
507 0x10, 0x0c, 0xe2, 0x7b, 508 0x10, 0x0c, 0xe4, 0x7b,
508 0x01, 0x03, 0xe6, 0x6b, 509 0x01, 0x03, 0xe8, 0x6b,
509 0x20, 0x93, 0xe8, 0x6b, 510 0x20, 0x93, 0xea, 0x6b,
510 0x04, 0x0b, 0xee, 0x6b, 511 0x04, 0x0b, 0xf0, 0x6b,
511 0x40, 0x3c, 0x78, 0x00, 512 0x40, 0x3c, 0x78, 0x00,
512 0xc7, 0x93, 0x26, 0x09, 513 0xc7, 0x93, 0x26, 0x09,
513 0x38, 0x93, 0xf0, 0x6b, 514 0x38, 0x93, 0xf2, 0x6b,
514 0x00, 0x65, 0xcc, 0x41, 515 0x00, 0x65, 0xcc, 0x41,
515 0x80, 0x3c, 0x56, 0x6c, 516 0x80, 0x3c, 0x58, 0x6c,
516 0x01, 0x06, 0x50, 0x31, 517 0x01, 0x06, 0x50, 0x31,
517 0x80, 0xb8, 0x70, 0x01, 518 0x80, 0xb8, 0x70, 0x01,
518 0x00, 0x65, 0xcc, 0x41, 519 0x00, 0x65, 0xcc, 0x41,
519 0x10, 0x3f, 0x06, 0x00, 520 0x10, 0x3f, 0x06, 0x00,
520 0x10, 0x6a, 0x06, 0x00, 521 0x10, 0x6a, 0x06, 0x00,
521 0x01, 0x3a, 0xca, 0x30, 522 0x01, 0x3a, 0xca, 0x30,
522 0x80, 0x65, 0x1c, 0x64, 523 0x80, 0x65, 0x1e, 0x64,
523 0x10, 0xb8, 0x40, 0x6c, 524 0x10, 0xb8, 0x42, 0x6c,
524 0xc0, 0x3e, 0xca, 0x00, 525 0xc0, 0x3e, 0xca, 0x00,
525 0x40, 0xb8, 0x0c, 0x6c, 526 0x40, 0xb8, 0x0e, 0x6c,
526 0xbf, 0x65, 0xca, 0x08, 527 0xbf, 0x65, 0xca, 0x08,
527 0x20, 0xb8, 0x20, 0x7c, 528 0x20, 0xb8, 0x22, 0x7c,
528 0x01, 0x65, 0x0c, 0x30, 529 0x01, 0x65, 0x0c, 0x30,
529 0x00, 0x65, 0xd8, 0x5d, 530 0x00, 0x65, 0xda, 0x5d,
530 0xa0, 0x3f, 0x28, 0x64, 531 0xa0, 0x3f, 0x2a, 0x64,
531 0x23, 0xb8, 0x0c, 0x08, 532 0x23, 0xb8, 0x0c, 0x08,
532 0x00, 0x65, 0xd8, 0x5d, 533 0x00, 0x65, 0xda, 0x5d,
533 0xa0, 0x3f, 0x28, 0x64, 534 0xa0, 0x3f, 0x2a, 0x64,
534 0x00, 0xbb, 0x20, 0x44, 535 0x00, 0xbb, 0x22, 0x44,
535 0xff, 0x65, 0x20, 0x64, 536 0xff, 0x65, 0x22, 0x64,
536 0x00, 0x65, 0x40, 0x44, 537 0x00, 0x65, 0x42, 0x44,
537 0x40, 0x6a, 0x18, 0x00, 538 0x40, 0x6a, 0x18, 0x00,
538 0x01, 0x65, 0x0c, 0x30, 539 0x01, 0x65, 0x0c, 0x30,
539 0x00, 0x65, 0xd8, 0x5d, 540 0x00, 0x65, 0xda, 0x5d,
540 0xa0, 0x3f, 0xfc, 0x73, 541 0xa0, 0x3f, 0xfe, 0x73,
541 0x40, 0x6a, 0x18, 0x00, 542 0x40, 0x6a, 0x18, 0x00,
542 0x01, 0x3a, 0xa6, 0x30, 543 0x01, 0x3a, 0xa6, 0x30,
543 0x08, 0x6a, 0x74, 0x00, 544 0x08, 0x6a, 0x74, 0x00,
544 0x00, 0x65, 0xcc, 0x41, 545 0x00, 0x65, 0xcc, 0x41,
545 0x64, 0x6a, 0x68, 0x5d, 546 0x64, 0x6a, 0x6a, 0x5d,
546 0x80, 0x64, 0xd8, 0x6c, 547 0x80, 0x64, 0xda, 0x6c,
547 0x04, 0x64, 0x9a, 0x74, 548 0x04, 0x64, 0x9c, 0x74,
548 0x02, 0x64, 0xaa, 0x74, 549 0x02, 0x64, 0xac, 0x74,
549 0x00, 0x6a, 0x60, 0x74, 550 0x00, 0x6a, 0x62, 0x74,
550 0x03, 0x64, 0xc8, 0x74, 551 0x03, 0x64, 0xca, 0x74,
551 0x23, 0x64, 0x48, 0x74, 552 0x23, 0x64, 0x4a, 0x74,
552 0x08, 0x64, 0x5c, 0x74, 553 0x08, 0x64, 0x5e, 0x74,
553 0x61, 0x6a, 0xd8, 0x5e, 554 0x61, 0x6a, 0xda, 0x5e,
554 0x00, 0x65, 0xd8, 0x5d, 555 0x00, 0x65, 0xda, 0x5d,
555 0x08, 0x51, 0xce, 0x71, 556 0x08, 0x51, 0xce, 0x71,
556 0x00, 0x65, 0x40, 0x44, 557 0x00, 0x65, 0x42, 0x44,
557 0x80, 0x04, 0x5a, 0x7c, 558 0x80, 0x04, 0x5c, 0x7c,
558 0x51, 0x6a, 0x5e, 0x5d, 559 0x51, 0x6a, 0x60, 0x5d,
559 0x01, 0x51, 0x5a, 0x64, 560 0x01, 0x51, 0x5c, 0x64,
560 0x01, 0xa4, 0x52, 0x7c, 561 0x01, 0xa4, 0x54, 0x7c,
561 0x80, 0xba, 0x5c, 0x6c, 562 0x80, 0xba, 0x5e, 0x6c,
562 0x41, 0x6a, 0xd8, 0x5e, 563 0x41, 0x6a, 0xda, 0x5e,
563 0x00, 0x65, 0x5c, 0x44, 564 0x00, 0x65, 0x5e, 0x44,
564 0x21, 0x6a, 0xd8, 0x5e, 565 0x21, 0x6a, 0xda, 0x5e,
565 0x00, 0x65, 0x5c, 0x44, 566 0x00, 0x65, 0x5e, 0x44,
566 0x07, 0x6a, 0x54, 0x5d, 567 0x07, 0x6a, 0x56, 0x5d,
567 0x01, 0x06, 0xd4, 0x30, 568 0x01, 0x06, 0xd4, 0x30,
568 0x00, 0x65, 0xcc, 0x41, 569 0x00, 0x65, 0xcc, 0x41,
569 0x80, 0xb8, 0x56, 0x7c, 570 0x80, 0xb8, 0x58, 0x7c,
570 0xc0, 0x3c, 0x6a, 0x7c, 571 0xc0, 0x3c, 0x6c, 0x7c,
571 0x80, 0x3c, 0x56, 0x6c, 572 0x80, 0x3c, 0x58, 0x6c,
572 0xff, 0xa8, 0x6a, 0x6c, 573 0xff, 0xa8, 0x6c, 0x6c,
573 0x40, 0x3c, 0x56, 0x6c, 574 0x40, 0x3c, 0x58, 0x6c,
574 0x10, 0xb8, 0x6e, 0x7c, 575 0x10, 0xb8, 0x70, 0x7c,
575 0xa1, 0x6a, 0xd8, 0x5e, 576 0xa1, 0x6a, 0xda, 0x5e,
576 0x01, 0xb4, 0x74, 0x6c, 577 0x01, 0xb4, 0x76, 0x6c,
577 0x02, 0xb4, 0x76, 0x6c, 578 0x02, 0xb4, 0x78, 0x6c,
578 0x01, 0xa4, 0x76, 0x7c, 579 0x01, 0xa4, 0x78, 0x7c,
579 0xff, 0xa8, 0x86, 0x7c, 580 0xff, 0xa8, 0x88, 0x7c,
580 0x04, 0xb4, 0x68, 0x01, 581 0x04, 0xb4, 0x68, 0x01,
581 0x01, 0x6a, 0x76, 0x00, 582 0x01, 0x6a, 0x76, 0x00,
582 0x00, 0xbb, 0x12, 0x5e, 583 0x00, 0xbb, 0x14, 0x5e,
583 0xff, 0xa8, 0x86, 0x7c, 584 0xff, 0xa8, 0x88, 0x7c,
584 0x71, 0x6a, 0xd8, 0x5e, 585 0x71, 0x6a, 0xda, 0x5e,
585 0x40, 0x51, 0x86, 0x64, 586 0x40, 0x51, 0x88, 0x64,
586 0x00, 0x65, 0xb2, 0x5e, 587 0x00, 0x65, 0xb4, 0x5e,
587 0x00, 0x65, 0xde, 0x41, 588 0x00, 0x65, 0xde, 0x41,
588 0x00, 0xbb, 0x8a, 0x5c, 589 0x00, 0xbb, 0x8c, 0x5c,
589 0x00, 0x65, 0xde, 0x41, 590 0x00, 0x65, 0xde, 0x41,
590 0x00, 0x65, 0xb2, 0x5e, 591 0x00, 0x65, 0xb4, 0x5e,
591 0x01, 0x65, 0xa2, 0x30, 592 0x01, 0x65, 0xa2, 0x30,
592 0x01, 0xf8, 0xc8, 0x30, 593 0x01, 0xf8, 0xc8, 0x30,
593 0x01, 0x4e, 0xc8, 0x30, 594 0x01, 0x4e, 0xc8, 0x30,
594 0x00, 0x6a, 0xb6, 0xdd, 595 0x00, 0x6a, 0xb8, 0xdd,
595 0x00, 0x51, 0xc8, 0x5d, 596 0x00, 0x51, 0xca, 0x5d,
596 0x01, 0x4e, 0x9c, 0x18, 597 0x01, 0x4e, 0x9c, 0x18,
597 0x02, 0x6a, 0x22, 0x05, 598 0x02, 0x6a, 0x22, 0x05,
598 0xc0, 0x3c, 0x56, 0x6c, 599 0xc0, 0x3c, 0x58, 0x6c,
599 0x04, 0xb8, 0x70, 0x01, 600 0x04, 0xb8, 0x70, 0x01,
600 0x00, 0x65, 0xd4, 0x5e, 601 0x00, 0x65, 0xd6, 0x5e,
601 0x20, 0xb8, 0xde, 0x69, 602 0x20, 0xb8, 0xde, 0x69,
602 0x01, 0xbb, 0xa2, 0x30, 603 0x01, 0xbb, 0xa2, 0x30,
603 0x3f, 0xba, 0x7c, 0x08, 604 0x3f, 0xba, 0x7c, 0x08,
604 0x00, 0xb9, 0xce, 0x5c, 605 0x00, 0xb9, 0xd0, 0x5c,
605 0x00, 0x65, 0xde, 0x41, 606 0x00, 0x65, 0xde, 0x41,
606 0x01, 0x06, 0xd4, 0x30, 607 0x01, 0x06, 0xd4, 0x30,
607 0x20, 0x3c, 0xcc, 0x79, 608 0x20, 0x3c, 0xcc, 0x79,
608 0x20, 0x3c, 0x5c, 0x7c, 609 0x20, 0x3c, 0x5e, 0x7c,
609 0x01, 0xa4, 0xb8, 0x7c, 610 0x01, 0xa4, 0xba, 0x7c,
610 0x01, 0xb4, 0x68, 0x01, 611 0x01, 0xb4, 0x68, 0x01,
611 0x00, 0x65, 0xcc, 0x41, 612 0x00, 0x65, 0xcc, 0x41,
612 0x00, 0x65, 0x5c, 0x44, 613 0x00, 0x65, 0x5e, 0x44,
613 0x04, 0x14, 0x58, 0x31, 614 0x04, 0x14, 0x58, 0x31,
614 0x01, 0x06, 0xd4, 0x30, 615 0x01, 0x06, 0xd4, 0x30,
615 0x08, 0xa0, 0x60, 0x31, 616 0x08, 0xa0, 0x60, 0x31,
616 0xac, 0x6a, 0xcc, 0x00, 617 0xac, 0x6a, 0xcc, 0x00,
617 0x14, 0x6a, 0xf4, 0x5d, 618 0x14, 0x6a, 0xf6, 0x5d,
618 0x01, 0x06, 0xd4, 0x30, 619 0x01, 0x06, 0xd4, 0x30,
619 0xa0, 0x6a, 0xec, 0x5d, 620 0xa0, 0x6a, 0xee, 0x5d,
620 0x00, 0x65, 0xcc, 0x41, 621 0x00, 0x65, 0xcc, 0x41,
621 0xdf, 0x3c, 0x78, 0x08, 622 0xdf, 0x3c, 0x78, 0x08,
622 0x12, 0x01, 0x02, 0x00, 623 0x12, 0x01, 0x02, 0x00,
623 0x00, 0x65, 0x5c, 0x44, 624 0x00, 0x65, 0x5e, 0x44,
624 0x4c, 0x65, 0xcc, 0x28, 625 0x4c, 0x65, 0xcc, 0x28,
625 0x01, 0x3e, 0x20, 0x31, 626 0x01, 0x3e, 0x20, 0x31,
626 0xd0, 0x66, 0xcc, 0x18, 627 0xd0, 0x66, 0xcc, 0x18,
@@ -631,102 +632,102 @@ static uint8_t seqprog[] = {
631 0xd0, 0x65, 0xca, 0x18, 632 0xd0, 0x65, 0xca, 0x18,
632 0x01, 0x3e, 0x20, 0x31, 633 0x01, 0x3e, 0x20, 0x31,
633 0x30, 0x65, 0xd4, 0x18, 634 0x30, 0x65, 0xd4, 0x18,
634 0x00, 0x65, 0xe6, 0x4c, 635 0x00, 0x65, 0xe8, 0x4c,
635 0xe1, 0x6a, 0x22, 0x01, 636 0xe1, 0x6a, 0x22, 0x01,
636 0xff, 0x6a, 0xd4, 0x08, 637 0xff, 0x6a, 0xd4, 0x08,
637 0x20, 0x65, 0xd4, 0x18, 638 0x20, 0x65, 0xd4, 0x18,
638 0x00, 0x65, 0xee, 0x54, 639 0x00, 0x65, 0xf0, 0x54,
639 0xe1, 0x6a, 0x22, 0x01, 640 0xe1, 0x6a, 0x22, 0x01,
640 0xff, 0x6a, 0xd4, 0x08, 641 0xff, 0x6a, 0xd4, 0x08,
641 0x20, 0x65, 0xca, 0x18, 642 0x20, 0x65, 0xca, 0x18,
642 0xe0, 0x65, 0xd4, 0x18, 643 0xe0, 0x65, 0xd4, 0x18,
643 0x00, 0x65, 0xf8, 0x4c, 644 0x00, 0x65, 0xfa, 0x4c,
644 0xe1, 0x6a, 0x22, 0x01, 645 0xe1, 0x6a, 0x22, 0x01,
645 0xff, 0x6a, 0xd4, 0x08, 646 0xff, 0x6a, 0xd4, 0x08,
646 0xd0, 0x65, 0xd4, 0x18, 647 0xd0, 0x65, 0xd4, 0x18,
647 0x00, 0x65, 0x00, 0x55, 648 0x00, 0x65, 0x02, 0x55,
648 0xe1, 0x6a, 0x22, 0x01, 649 0xe1, 0x6a, 0x22, 0x01,
649 0xff, 0x6a, 0xd4, 0x08, 650 0xff, 0x6a, 0xd4, 0x08,
650 0x01, 0x6c, 0xa2, 0x30, 651 0x01, 0x6c, 0xa2, 0x30,
651 0xff, 0x51, 0x12, 0x75, 652 0xff, 0x51, 0x14, 0x75,
652 0x00, 0x51, 0x8e, 0x5d, 653 0x00, 0x51, 0x90, 0x5d,
653 0x01, 0x51, 0x20, 0x31, 654 0x01, 0x51, 0x20, 0x31,
654 0x00, 0x65, 0x34, 0x45, 655 0x00, 0x65, 0x36, 0x45,
655 0x3f, 0xba, 0xc8, 0x08, 656 0x3f, 0xba, 0xc8, 0x08,
656 0x00, 0x3e, 0x34, 0x75, 657 0x00, 0x3e, 0x36, 0x75,
657 0x00, 0x65, 0xb0, 0x5e, 658 0x00, 0x65, 0xb2, 0x5e,
658 0x80, 0x3c, 0x78, 0x00, 659 0x80, 0x3c, 0x78, 0x00,
659 0x01, 0x06, 0xd4, 0x30, 660 0x01, 0x06, 0xd4, 0x30,
660 0x00, 0x65, 0xd8, 0x5d, 661 0x00, 0x65, 0xda, 0x5d,
661 0x01, 0x3c, 0x78, 0x00, 662 0x01, 0x3c, 0x78, 0x00,
662 0xe0, 0x3f, 0x50, 0x65, 663 0xe0, 0x3f, 0x52, 0x65,
663 0x02, 0x3c, 0x78, 0x00, 664 0x02, 0x3c, 0x78, 0x00,
664 0x20, 0x12, 0x50, 0x65, 665 0x20, 0x12, 0x52, 0x65,
665 0x51, 0x6a, 0x5e, 0x5d, 666 0x51, 0x6a, 0x60, 0x5d,
666 0x00, 0x51, 0x8e, 0x5d, 667 0x00, 0x51, 0x90, 0x5d,
667 0x51, 0x6a, 0x5e, 0x5d, 668 0x51, 0x6a, 0x60, 0x5d,
668 0x01, 0x51, 0x20, 0x31, 669 0x01, 0x51, 0x20, 0x31,
669 0x04, 0x3c, 0x78, 0x00, 670 0x04, 0x3c, 0x78, 0x00,
670 0x01, 0xb9, 0xc8, 0x30, 671 0x01, 0xb9, 0xc8, 0x30,
671 0x00, 0x3d, 0x4e, 0x65, 672 0x00, 0x3d, 0x50, 0x65,
672 0x08, 0x3c, 0x78, 0x00, 673 0x08, 0x3c, 0x78, 0x00,
673 0x3f, 0xba, 0xc8, 0x08, 674 0x3f, 0xba, 0xc8, 0x08,
674 0x00, 0x3e, 0x4e, 0x65, 675 0x00, 0x3e, 0x50, 0x65,
675 0x10, 0x3c, 0x78, 0x00, 676 0x10, 0x3c, 0x78, 0x00,
676 0x04, 0xb8, 0x4e, 0x7d, 677 0x04, 0xb8, 0x50, 0x7d,
677 0xfb, 0xb8, 0x70, 0x09, 678 0xfb, 0xb8, 0x70, 0x09,
678 0x20, 0xb8, 0x44, 0x6d, 679 0x20, 0xb8, 0x46, 0x6d,
679 0x01, 0x90, 0xc8, 0x30, 680 0x01, 0x90, 0xc8, 0x30,
680 0xff, 0x6a, 0xa2, 0x00, 681 0xff, 0x6a, 0xa2, 0x00,
681 0x00, 0x3d, 0xce, 0x5c, 682 0x00, 0x3d, 0xd0, 0x5c,
682 0x01, 0x64, 0x20, 0x31, 683 0x01, 0x64, 0x20, 0x31,
683 0xff, 0x6a, 0x78, 0x08, 684 0xff, 0x6a, 0x78, 0x08,
684 0x00, 0x65, 0xea, 0x58, 685 0x00, 0x65, 0xea, 0x58,
685 0x10, 0xb8, 0x5c, 0x7c, 686 0x10, 0xb8, 0x5e, 0x7c,
686 0xff, 0x6a, 0x54, 0x5d, 687 0xff, 0x6a, 0x56, 0x5d,
687 0x00, 0x65, 0x5c, 0x44, 688 0x00, 0x65, 0x5e, 0x44,
688 0x00, 0x65, 0xb0, 0x5e, 689 0x00, 0x65, 0xb2, 0x5e,
689 0x31, 0x6a, 0xd8, 0x5e, 690 0x31, 0x6a, 0xda, 0x5e,
690 0x00, 0x65, 0x5c, 0x44, 691 0x00, 0x65, 0x5e, 0x44,
691 0x10, 0x3f, 0x06, 0x00, 692 0x10, 0x3f, 0x06, 0x00,
692 0x10, 0x6a, 0x06, 0x00, 693 0x10, 0x6a, 0x06, 0x00,
693 0x01, 0x65, 0x74, 0x34, 694 0x01, 0x65, 0x74, 0x34,
694 0x81, 0x6a, 0xd8, 0x5e, 695 0x81, 0x6a, 0xda, 0x5e,
695 0x00, 0x65, 0x60, 0x45, 696 0x00, 0x65, 0x62, 0x45,
696 0x01, 0x06, 0xd4, 0x30, 697 0x01, 0x06, 0xd4, 0x30,
697 0x01, 0x0c, 0x60, 0x7d, 698 0x01, 0x0c, 0x62, 0x7d,
698 0x04, 0x0c, 0x5a, 0x6d, 699 0x04, 0x0c, 0x5c, 0x6d,
699 0xe0, 0x03, 0x7e, 0x08, 700 0xe0, 0x03, 0x7e, 0x08,
700 0xe0, 0x3f, 0xcc, 0x61, 701 0xe0, 0x3f, 0xcc, 0x61,
701 0x01, 0x65, 0xcc, 0x30, 702 0x01, 0x65, 0xcc, 0x30,
702 0x01, 0x12, 0xda, 0x34, 703 0x01, 0x12, 0xda, 0x34,
703 0x01, 0x06, 0xd4, 0x34, 704 0x01, 0x06, 0xd4, 0x34,
704 0x01, 0x03, 0x6e, 0x6d, 705 0x01, 0x03, 0x70, 0x6d,
705 0x40, 0x03, 0xcc, 0x08, 706 0x40, 0x03, 0xcc, 0x08,
706 0x01, 0x65, 0x06, 0x30, 707 0x01, 0x65, 0x06, 0x30,
707 0x40, 0x65, 0xc8, 0x08, 708 0x40, 0x65, 0xc8, 0x08,
708 0x00, 0x66, 0x7c, 0x75, 709 0x00, 0x66, 0x7e, 0x75,
709 0x40, 0x65, 0x7c, 0x7d, 710 0x40, 0x65, 0x7e, 0x7d,
710 0x00, 0x65, 0x7c, 0x5d, 711 0x00, 0x65, 0x7e, 0x5d,
711 0xff, 0x6a, 0xd4, 0x08, 712 0xff, 0x6a, 0xd4, 0x08,
712 0xff, 0x6a, 0xd4, 0x08, 713 0xff, 0x6a, 0xd4, 0x08,
713 0xff, 0x6a, 0xd4, 0x08, 714 0xff, 0x6a, 0xd4, 0x08,
714 0xff, 0x6a, 0xd4, 0x0c, 715 0xff, 0x6a, 0xd4, 0x0c,
715 0x08, 0x01, 0x02, 0x00, 716 0x08, 0x01, 0x02, 0x00,
716 0x02, 0x0b, 0x86, 0x7d, 717 0x02, 0x0b, 0x88, 0x7d,
717 0x01, 0x65, 0x0c, 0x30, 718 0x01, 0x65, 0x0c, 0x30,
718 0x02, 0x0b, 0x8a, 0x7d, 719 0x02, 0x0b, 0x8c, 0x7d,
719 0xf7, 0x01, 0x02, 0x0c, 720 0xf7, 0x01, 0x02, 0x0c,
720 0x01, 0x65, 0xc8, 0x30, 721 0x01, 0x65, 0xc8, 0x30,
721 0xff, 0x41, 0xae, 0x75, 722 0xff, 0x41, 0xb0, 0x75,
722 0x01, 0x41, 0x20, 0x31, 723 0x01, 0x41, 0x20, 0x31,
723 0xff, 0x6a, 0xa4, 0x00, 724 0xff, 0x6a, 0xa4, 0x00,
724 0x00, 0x65, 0x9e, 0x45, 725 0x00, 0x65, 0xa0, 0x45,
725 0xff, 0xbf, 0xae, 0x75, 726 0xff, 0xbf, 0xb0, 0x75,
726 0x01, 0x90, 0xa4, 0x30, 727 0x01, 0x90, 0xa4, 0x30,
727 0x01, 0xbf, 0x20, 0x31, 728 0x01, 0xbf, 0x20, 0x31,
728 0x00, 0xbb, 0x98, 0x65, 729 0x00, 0xbb, 0x9a, 0x65,
729 0xff, 0x52, 0xac, 0x75, 730 0xff, 0x52, 0xae, 0x75,
730 0x01, 0xbf, 0xcc, 0x30, 731 0x01, 0xbf, 0xcc, 0x30,
731 0x01, 0x90, 0xca, 0x30, 732 0x01, 0x90, 0xca, 0x30,
732 0x01, 0x52, 0x20, 0x31, 733 0x01, 0x52, 0x20, 0x31,
@@ -734,28 +735,28 @@ static uint8_t seqprog[] = {
734 0x01, 0x65, 0x20, 0x35, 735 0x01, 0x65, 0x20, 0x35,
735 0x01, 0xbf, 0x82, 0x34, 736 0x01, 0xbf, 0x82, 0x34,
736 0x01, 0x64, 0xa2, 0x30, 737 0x01, 0x64, 0xa2, 0x30,
737 0x00, 0x6a, 0xc0, 0x5e, 738 0x00, 0x6a, 0xc2, 0x5e,
738 0x0d, 0x6a, 0x76, 0x00, 739 0x0d, 0x6a, 0x76, 0x00,
739 0x00, 0x51, 0x12, 0x46, 740 0x00, 0x51, 0x14, 0x46,
740 0x01, 0x65, 0xa4, 0x30, 741 0x01, 0x65, 0xa4, 0x30,
741 0xe0, 0x6a, 0xcc, 0x00, 742 0xe0, 0x6a, 0xcc, 0x00,
742 0x48, 0x6a, 0x06, 0x5e, 743 0x48, 0x6a, 0x08, 0x5e,
743 0x01, 0x6a, 0xd0, 0x01, 744 0x01, 0x6a, 0xd0, 0x01,
744 0x01, 0x6a, 0xdc, 0x05, 745 0x01, 0x6a, 0xdc, 0x05,
745 0x88, 0x6a, 0xcc, 0x00, 746 0x88, 0x6a, 0xcc, 0x00,
746 0x48, 0x6a, 0x06, 0x5e, 747 0x48, 0x6a, 0x08, 0x5e,
747 0x01, 0x6a, 0xe0, 0x5d, 748 0x01, 0x6a, 0xe2, 0x5d,
748 0x01, 0x6a, 0x26, 0x05, 749 0x01, 0x6a, 0x26, 0x05,
749 0x01, 0x65, 0xd8, 0x31, 750 0x01, 0x65, 0xd8, 0x31,
750 0x09, 0xee, 0xdc, 0x01, 751 0x09, 0xee, 0xdc, 0x01,
751 0x80, 0xee, 0xcc, 0x7d, 752 0x80, 0xee, 0xce, 0x7d,
752 0xff, 0x6a, 0xdc, 0x0d, 753 0xff, 0x6a, 0xdc, 0x0d,
753 0x01, 0x65, 0x32, 0x31, 754 0x01, 0x65, 0x32, 0x31,
754 0x0a, 0x93, 0x26, 0x01, 755 0x0a, 0x93, 0x26, 0x01,
755 0x00, 0x65, 0xa8, 0x46, 756 0x00, 0x65, 0xaa, 0x46,
756 0x81, 0x6a, 0xd8, 0x5e, 757 0x81, 0x6a, 0xda, 0x5e,
757 0x01, 0x0c, 0xd8, 0x7d, 758 0x01, 0x0c, 0xda, 0x7d,
758 0x04, 0x0c, 0xd6, 0x6d, 759 0x04, 0x0c, 0xd8, 0x6d,
759 0xe0, 0x03, 0x06, 0x08, 760 0xe0, 0x03, 0x06, 0x08,
760 0xe0, 0x03, 0x7e, 0x0c, 761 0xe0, 0x03, 0x7e, 0x0c,
761 0x01, 0x65, 0x18, 0x31, 762 0x01, 0x65, 0x18, 0x31,
@@ -774,7 +775,7 @@ static uint8_t seqprog[] = {
774 0x01, 0x6c, 0xda, 0x34, 775 0x01, 0x6c, 0xda, 0x34,
775 0x3d, 0x64, 0xa4, 0x28, 776 0x3d, 0x64, 0xa4, 0x28,
776 0x55, 0x64, 0xc8, 0x28, 777 0x55, 0x64, 0xc8, 0x28,
777 0x00, 0x65, 0x06, 0x46, 778 0x00, 0x65, 0x08, 0x46,
778 0x2e, 0x64, 0xa4, 0x28, 779 0x2e, 0x64, 0xa4, 0x28,
779 0x66, 0x64, 0xc8, 0x28, 780 0x66, 0x64, 0xc8, 0x28,
780 0x00, 0x6c, 0xda, 0x18, 781 0x00, 0x6c, 0xda, 0x18,
@@ -785,63 +786,63 @@ static uint8_t seqprog[] = {
785 0x00, 0x6c, 0xda, 0x24, 786 0x00, 0x6c, 0xda, 0x24,
786 0x01, 0x65, 0xc8, 0x30, 787 0x01, 0x65, 0xc8, 0x30,
787 0xe0, 0x6a, 0xcc, 0x00, 788 0xe0, 0x6a, 0xcc, 0x00,
788 0x44, 0x6a, 0x02, 0x5e, 789 0x44, 0x6a, 0x04, 0x5e,
789 0x01, 0x90, 0xe2, 0x31, 790 0x01, 0x90, 0xe2, 0x31,
790 0x04, 0x3b, 0x26, 0x7e, 791 0x04, 0x3b, 0x28, 0x7e,
791 0x30, 0x6a, 0xd0, 0x01, 792 0x30, 0x6a, 0xd0, 0x01,
792 0x20, 0x6a, 0xd0, 0x01, 793 0x20, 0x6a, 0xd0, 0x01,
793 0x1d, 0x6a, 0xdc, 0x01, 794 0x1d, 0x6a, 0xdc, 0x01,
794 0xdc, 0xee, 0x22, 0x66, 795 0xdc, 0xee, 0x24, 0x66,
795 0x00, 0x65, 0x3e, 0x46, 796 0x00, 0x65, 0x40, 0x46,
796 0x20, 0x6a, 0xd0, 0x01, 797 0x20, 0x6a, 0xd0, 0x01,
797 0x01, 0x6a, 0xdc, 0x01, 798 0x01, 0x6a, 0xdc, 0x01,
798 0x20, 0xa0, 0xd8, 0x31, 799 0x20, 0xa0, 0xd8, 0x31,
799 0x09, 0xee, 0xdc, 0x01, 800 0x09, 0xee, 0xdc, 0x01,
800 0x80, 0xee, 0x2e, 0x7e, 801 0x80, 0xee, 0x30, 0x7e,
801 0x11, 0x6a, 0xdc, 0x01, 802 0x11, 0x6a, 0xdc, 0x01,
802 0x50, 0xee, 0x32, 0x66, 803 0x50, 0xee, 0x34, 0x66,
803 0x20, 0x6a, 0xd0, 0x01, 804 0x20, 0x6a, 0xd0, 0x01,
804 0x09, 0x6a, 0xdc, 0x01, 805 0x09, 0x6a, 0xdc, 0x01,
805 0x88, 0xee, 0x38, 0x66, 806 0x88, 0xee, 0x3a, 0x66,
806 0x19, 0x6a, 0xdc, 0x01, 807 0x19, 0x6a, 0xdc, 0x01,
807 0xd8, 0xee, 0x3c, 0x66, 808 0xd8, 0xee, 0x3e, 0x66,
808 0xff, 0x6a, 0xdc, 0x09, 809 0xff, 0x6a, 0xdc, 0x09,
809 0x18, 0xee, 0x40, 0x6e, 810 0x18, 0xee, 0x42, 0x6e,
810 0xff, 0x6a, 0xd4, 0x0c, 811 0xff, 0x6a, 0xd4, 0x0c,
811 0x88, 0x6a, 0xcc, 0x00, 812 0x88, 0x6a, 0xcc, 0x00,
812 0x44, 0x6a, 0x02, 0x5e, 813 0x44, 0x6a, 0x04, 0x5e,
813 0x20, 0x6a, 0xe0, 0x5d, 814 0x20, 0x6a, 0xe2, 0x5d,
814 0x01, 0x3b, 0x26, 0x31, 815 0x01, 0x3b, 0x26, 0x31,
815 0x04, 0x3b, 0x5a, 0x6e, 816 0x04, 0x3b, 0x5c, 0x6e,
816 0xa0, 0x6a, 0xca, 0x00, 817 0xa0, 0x6a, 0xca, 0x00,
817 0x20, 0x65, 0xc8, 0x18, 818 0x20, 0x65, 0xc8, 0x18,
818 0x00, 0x65, 0x98, 0x5e, 819 0x00, 0x65, 0x9a, 0x5e,
819 0x00, 0x65, 0x52, 0x66, 820 0x00, 0x65, 0x54, 0x66,
820 0x0a, 0x93, 0x26, 0x01, 821 0x0a, 0x93, 0x26, 0x01,
821 0x00, 0x65, 0xa8, 0x46, 822 0x00, 0x65, 0xaa, 0x46,
822 0xa0, 0x6a, 0xcc, 0x00, 823 0xa0, 0x6a, 0xcc, 0x00,
823 0xff, 0x6a, 0xc8, 0x08, 824 0xff, 0x6a, 0xc8, 0x08,
824 0x20, 0x94, 0x5e, 0x6e, 825 0x20, 0x94, 0x60, 0x6e,
825 0x10, 0x94, 0x60, 0x6e, 826 0x10, 0x94, 0x62, 0x6e,
826 0x08, 0x94, 0x7a, 0x6e, 827 0x08, 0x94, 0x7c, 0x6e,
827 0x08, 0x94, 0x7a, 0x6e, 828 0x08, 0x94, 0x7c, 0x6e,
828 0x08, 0x94, 0x7a, 0x6e, 829 0x08, 0x94, 0x7c, 0x6e,
829 0xff, 0x8c, 0xc8, 0x10, 830 0xff, 0x8c, 0xc8, 0x10,
830 0xc1, 0x64, 0xc8, 0x18, 831 0xc1, 0x64, 0xc8, 0x18,
831 0xf8, 0x64, 0xc8, 0x08, 832 0xf8, 0x64, 0xc8, 0x08,
832 0x01, 0x99, 0xda, 0x30, 833 0x01, 0x99, 0xda, 0x30,
833 0x00, 0x66, 0x6e, 0x66, 834 0x00, 0x66, 0x70, 0x66,
834 0xc0, 0x66, 0xaa, 0x76, 835 0xc0, 0x66, 0xac, 0x76,
835 0x60, 0x66, 0xc8, 0x18, 836 0x60, 0x66, 0xc8, 0x18,
836 0x3d, 0x64, 0xc8, 0x28, 837 0x3d, 0x64, 0xc8, 0x28,
837 0x00, 0x65, 0x5e, 0x46, 838 0x00, 0x65, 0x60, 0x46,
838 0xf7, 0x93, 0x26, 0x09, 839 0xf7, 0x93, 0x26, 0x09,
839 0x08, 0x93, 0x7c, 0x6e, 840 0x08, 0x93, 0x7e, 0x6e,
840 0x00, 0x62, 0xc4, 0x18, 841 0x00, 0x62, 0xc4, 0x18,
841 0x00, 0x65, 0xa8, 0x5e, 842 0x00, 0x65, 0xaa, 0x5e,
842 0x00, 0x65, 0x88, 0x5e, 843 0x00, 0x65, 0x8a, 0x5e,
843 0x00, 0x65, 0x88, 0x5e, 844 0x00, 0x65, 0x8a, 0x5e,
844 0x00, 0x65, 0x88, 0x5e, 845 0x00, 0x65, 0x8a, 0x5e,
845 0x01, 0x99, 0xda, 0x30, 846 0x01, 0x99, 0xda, 0x30,
846 0x01, 0x99, 0xda, 0x30, 847 0x01, 0x99, 0xda, 0x30,
847 0x01, 0x99, 0xda, 0x30, 848 0x01, 0x99, 0xda, 0x30,
@@ -858,11 +859,11 @@ static uint8_t seqprog[] = {
858 0x01, 0x6c, 0x32, 0x31, 859 0x01, 0x6c, 0x32, 0x31,
859 0x01, 0x6c, 0x32, 0x31, 860 0x01, 0x6c, 0x32, 0x31,
860 0x01, 0x6c, 0x32, 0x35, 861 0x01, 0x6c, 0x32, 0x35,
861 0x08, 0x94, 0xa8, 0x7e, 862 0x08, 0x94, 0xaa, 0x7e,
862 0xf7, 0x93, 0x26, 0x09, 863 0xf7, 0x93, 0x26, 0x09,
863 0x08, 0x93, 0xac, 0x6e, 864 0x08, 0x93, 0xae, 0x6e,
864 0xff, 0x6a, 0xd4, 0x0c, 865 0xff, 0x6a, 0xd4, 0x0c,
865 0x04, 0xb8, 0xd4, 0x6e, 866 0x04, 0xb8, 0xd6, 0x6e,
866 0x01, 0x42, 0x7e, 0x31, 867 0x01, 0x42, 0x7e, 0x31,
867 0xff, 0x6a, 0x76, 0x01, 868 0xff, 0x6a, 0x76, 0x01,
868 0x01, 0x90, 0x84, 0x34, 869 0x01, 0x90, 0x84, 0x34,
@@ -870,14 +871,14 @@ static uint8_t seqprog[] = {
870 0x01, 0x85, 0x0a, 0x01, 871 0x01, 0x85, 0x0a, 0x01,
871 0x7f, 0x65, 0x10, 0x09, 872 0x7f, 0x65, 0x10, 0x09,
872 0xfe, 0x85, 0x0a, 0x0d, 873 0xfe, 0x85, 0x0a, 0x0d,
873 0xff, 0x42, 0xd0, 0x66, 874 0xff, 0x42, 0xd2, 0x66,
874 0xff, 0x41, 0xc8, 0x66, 875 0xff, 0x41, 0xca, 0x66,
875 0xd1, 0x6a, 0xd8, 0x5e, 876 0xd1, 0x6a, 0xda, 0x5e,
876 0xff, 0x6a, 0xca, 0x04, 877 0xff, 0x6a, 0xca, 0x04,
877 0x01, 0x41, 0x20, 0x31, 878 0x01, 0x41, 0x20, 0x31,
878 0x01, 0xbf, 0x82, 0x30, 879 0x01, 0xbf, 0x82, 0x30,
879 0x01, 0x6a, 0x76, 0x00, 880 0x01, 0x6a, 0x76, 0x00,
880 0x00, 0xbb, 0x12, 0x46, 881 0x00, 0xbb, 0x14, 0x46,
881 0x01, 0x42, 0x20, 0x31, 882 0x01, 0x42, 0x20, 0x31,
882 0x01, 0xbf, 0x84, 0x34, 883 0x01, 0xbf, 0x84, 0x34,
883 0x01, 0x41, 0x7e, 0x31, 884 0x01, 0x41, 0x7e, 0x31,
@@ -941,7 +942,7 @@ static ahc_patch_func_t ahc_patch17_func;
941static int 942static int
942ahc_patch17_func(struct ahc_softc *ahc) 943ahc_patch17_func(struct ahc_softc *ahc)
943{ 944{
944 return ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0); 945 return ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0);
945} 946}
946 947
947static ahc_patch_func_t ahc_patch16_func; 948static ahc_patch_func_t ahc_patch16_func;
@@ -1142,152 +1143,152 @@ static struct patch {
1142 { ahc_patch0_func, 196, 1, 1 }, 1143 { ahc_patch0_func, 196, 1, 1 },
1143 { ahc_patch9_func, 212, 6, 2 }, 1144 { ahc_patch9_func, 212, 6, 2 },
1144 { ahc_patch0_func, 218, 6, 1 }, 1145 { ahc_patch0_func, 218, 6, 1 },
1145 { ahc_patch8_func, 226, 20, 2 }, 1146 { ahc_patch8_func, 226, 21, 2 },
1146 { ahc_patch1_func, 241, 1, 1 }, 1147 { ahc_patch1_func, 241, 1, 1 },
1147 { ahc_patch1_func, 248, 1, 2 }, 1148 { ahc_patch1_func, 249, 1, 2 },
1148 { ahc_patch0_func, 249, 2, 2 }, 1149 { ahc_patch0_func, 250, 2, 2 },
1149 { ahc_patch11_func, 250, 1, 1 }, 1150 { ahc_patch11_func, 251, 1, 1 },
1150 { ahc_patch9_func, 258, 27, 3 }, 1151 { ahc_patch9_func, 259, 27, 3 },
1151 { ahc_patch1_func, 274, 10, 2 }, 1152 { ahc_patch1_func, 275, 10, 2 },
1152 { ahc_patch13_func, 277, 1, 1 }, 1153 { ahc_patch13_func, 278, 1, 1 },
1153 { ahc_patch14_func, 285, 14, 1 }, 1154 { ahc_patch14_func, 286, 14, 1 },
1154 { ahc_patch1_func, 301, 1, 2 }, 1155 { ahc_patch1_func, 302, 1, 2 },
1155 { ahc_patch0_func, 302, 1, 1 }, 1156 { ahc_patch0_func, 303, 1, 1 },
1156 { ahc_patch9_func, 305, 1, 1 }, 1157 { ahc_patch9_func, 306, 1, 1 },
1157 { ahc_patch13_func, 310, 1, 1 }, 1158 { ahc_patch13_func, 311, 1, 1 },
1158 { ahc_patch9_func, 311, 2, 2 }, 1159 { ahc_patch9_func, 312, 2, 2 },
1159 { ahc_patch0_func, 313, 4, 1 }, 1160 { ahc_patch0_func, 314, 4, 1 },
1160 { ahc_patch14_func, 317, 1, 1 }, 1161 { ahc_patch14_func, 318, 1, 1 },
1161 { ahc_patch15_func, 319, 2, 3 }, 1162 { ahc_patch15_func, 320, 2, 3 },
1162 { ahc_patch9_func, 319, 1, 2 }, 1163 { ahc_patch9_func, 320, 1, 2 },
1163 { ahc_patch0_func, 320, 1, 1 }, 1164 { ahc_patch0_func, 321, 1, 1 },
1164 { ahc_patch6_func, 325, 1, 2 }, 1165 { ahc_patch6_func, 326, 1, 2 },
1165 { ahc_patch0_func, 326, 1, 1 }, 1166 { ahc_patch0_func, 327, 1, 1 },
1166 { ahc_patch1_func, 330, 47, 11 }, 1167 { ahc_patch1_func, 331, 47, 11 },
1167 { ahc_patch6_func, 337, 2, 4 }, 1168 { ahc_patch6_func, 338, 2, 4 },
1168 { ahc_patch7_func, 337, 1, 1 }, 1169 { ahc_patch7_func, 338, 1, 1 },
1169 { ahc_patch8_func, 338, 1, 1 }, 1170 { ahc_patch8_func, 339, 1, 1 },
1170 { ahc_patch0_func, 339, 1, 1 }, 1171 { ahc_patch0_func, 340, 1, 1 },
1171 { ahc_patch16_func, 340, 1, 1 }, 1172 { ahc_patch16_func, 341, 1, 1 },
1172 { ahc_patch6_func, 356, 6, 3 }, 1173 { ahc_patch6_func, 357, 6, 3 },
1173 { ahc_patch16_func, 356, 5, 1 }, 1174 { ahc_patch16_func, 357, 5, 1 },
1174 { ahc_patch0_func, 362, 7, 1 }, 1175 { ahc_patch0_func, 363, 7, 1 },
1175 { ahc_patch13_func, 372, 5, 1 }, 1176 { ahc_patch13_func, 373, 5, 1 },
1176 { ahc_patch0_func, 377, 52, 17 }, 1177 { ahc_patch0_func, 378, 52, 17 },
1177 { ahc_patch14_func, 377, 1, 1 }, 1178 { ahc_patch14_func, 378, 1, 1 },
1178 { ahc_patch7_func, 379, 2, 2 }, 1179 { ahc_patch7_func, 380, 2, 2 },
1179 { ahc_patch17_func, 380, 1, 1 }, 1180 { ahc_patch17_func, 381, 1, 1 },
1180 { ahc_patch9_func, 383, 1, 1 }, 1181 { ahc_patch9_func, 384, 1, 1 },
1181 { ahc_patch18_func, 390, 1, 1 }, 1182 { ahc_patch18_func, 391, 1, 1 },
1182 { ahc_patch14_func, 395, 9, 3 }, 1183 { ahc_patch14_func, 396, 9, 3 },
1183 { ahc_patch9_func, 396, 3, 2 }, 1184 { ahc_patch9_func, 397, 3, 2 },
1184 { ahc_patch0_func, 399, 3, 1 }, 1185 { ahc_patch0_func, 400, 3, 1 },
1185 { ahc_patch9_func, 407, 6, 2 }, 1186 { ahc_patch9_func, 408, 6, 2 },
1186 { ahc_patch0_func, 413, 9, 2 }, 1187 { ahc_patch0_func, 414, 9, 2 },
1187 { ahc_patch13_func, 413, 1, 1 }, 1188 { ahc_patch13_func, 414, 1, 1 },
1188 { ahc_patch13_func, 422, 2, 1 }, 1189 { ahc_patch13_func, 423, 2, 1 },
1189 { ahc_patch14_func, 424, 1, 1 }, 1190 { ahc_patch14_func, 425, 1, 1 },
1190 { ahc_patch9_func, 426, 1, 2 }, 1191 { ahc_patch9_func, 427, 1, 2 },
1191 { ahc_patch0_func, 427, 1, 1 }, 1192 { ahc_patch0_func, 428, 1, 1 },
1192 { ahc_patch7_func, 428, 1, 1 },
1193 { ahc_patch7_func, 429, 1, 1 }, 1193 { ahc_patch7_func, 429, 1, 1 },
1194 { ahc_patch8_func, 430, 3, 3 }, 1194 { ahc_patch7_func, 430, 1, 1 },
1195 { ahc_patch6_func, 431, 1, 2 }, 1195 { ahc_patch8_func, 431, 3, 3 },
1196 { ahc_patch0_func, 432, 1, 1 }, 1196 { ahc_patch6_func, 432, 1, 2 },
1197 { ahc_patch9_func, 433, 1, 1 }, 1197 { ahc_patch0_func, 433, 1, 1 },
1198 { ahc_patch15_func, 434, 1, 2 }, 1198 { ahc_patch9_func, 434, 1, 1 },
1199 { ahc_patch13_func, 434, 1, 1 }, 1199 { ahc_patch15_func, 435, 1, 2 },
1200 { ahc_patch14_func, 436, 9, 4 }, 1200 { ahc_patch13_func, 435, 1, 1 },
1201 { ahc_patch9_func, 436, 1, 1 }, 1201 { ahc_patch14_func, 437, 9, 4 },
1202 { ahc_patch9_func, 443, 2, 1 }, 1202 { ahc_patch9_func, 437, 1, 1 },
1203 { ahc_patch0_func, 445, 4, 3 }, 1203 { ahc_patch9_func, 444, 2, 1 },
1204 { ahc_patch9_func, 445, 1, 2 }, 1204 { ahc_patch0_func, 446, 4, 3 },
1205 { ahc_patch0_func, 446, 3, 1 }, 1205 { ahc_patch9_func, 446, 1, 2 },
1206 { ahc_patch1_func, 450, 2, 1 }, 1206 { ahc_patch0_func, 447, 3, 1 },
1207 { ahc_patch7_func, 452, 10, 2 }, 1207 { ahc_patch1_func, 451, 2, 1 },
1208 { ahc_patch0_func, 462, 1, 1 }, 1208 { ahc_patch7_func, 453, 10, 2 },
1209 { ahc_patch8_func, 463, 118, 22 }, 1209 { ahc_patch0_func, 463, 1, 1 },
1210 { ahc_patch1_func, 465, 3, 2 }, 1210 { ahc_patch8_func, 464, 118, 22 },
1211 { ahc_patch0_func, 468, 5, 3 }, 1211 { ahc_patch1_func, 466, 3, 2 },
1212 { ahc_patch9_func, 468, 2, 2 }, 1212 { ahc_patch0_func, 469, 5, 3 },
1213 { ahc_patch0_func, 470, 3, 1 }, 1213 { ahc_patch9_func, 469, 2, 2 },
1214 { ahc_patch1_func, 475, 2, 2 }, 1214 { ahc_patch0_func, 471, 3, 1 },
1215 { ahc_patch0_func, 477, 6, 3 }, 1215 { ahc_patch1_func, 476, 2, 2 },
1216 { ahc_patch9_func, 477, 2, 2 }, 1216 { ahc_patch0_func, 478, 6, 3 },
1217 { ahc_patch0_func, 479, 3, 1 }, 1217 { ahc_patch9_func, 478, 2, 2 },
1218 { ahc_patch1_func, 485, 2, 2 }, 1218 { ahc_patch0_func, 480, 3, 1 },
1219 { ahc_patch0_func, 487, 9, 7 }, 1219 { ahc_patch1_func, 486, 2, 2 },
1220 { ahc_patch9_func, 487, 5, 6 }, 1220 { ahc_patch0_func, 488, 9, 7 },
1221 { ahc_patch19_func, 487, 1, 2 }, 1221 { ahc_patch9_func, 488, 5, 6 },
1222 { ahc_patch0_func, 488, 1, 1 }, 1222 { ahc_patch19_func, 488, 1, 2 },
1223 { ahc_patch19_func, 490, 1, 2 }, 1223 { ahc_patch0_func, 489, 1, 1 },
1224 { ahc_patch0_func, 491, 1, 1 }, 1224 { ahc_patch19_func, 491, 1, 2 },
1225 { ahc_patch0_func, 492, 4, 1 }, 1225 { ahc_patch0_func, 492, 1, 1 },
1226 { ahc_patch6_func, 497, 3, 2 }, 1226 { ahc_patch0_func, 493, 4, 1 },
1227 { ahc_patch0_func, 500, 1, 1 }, 1227 { ahc_patch6_func, 498, 3, 2 },
1228 { ahc_patch6_func, 510, 1, 2 }, 1228 { ahc_patch0_func, 501, 1, 1 },
1229 { ahc_patch0_func, 511, 1, 1 }, 1229 { ahc_patch6_func, 511, 1, 2 },
1230 { ahc_patch20_func, 548, 7, 1 }, 1230 { ahc_patch0_func, 512, 1, 1 },
1231 { ahc_patch3_func, 583, 1, 2 }, 1231 { ahc_patch20_func, 549, 7, 1 },
1232 { ahc_patch0_func, 584, 1, 1 }, 1232 { ahc_patch3_func, 584, 1, 2 },
1233 { ahc_patch21_func, 587, 1, 1 }, 1233 { ahc_patch0_func, 585, 1, 1 },
1234 { ahc_patch8_func, 589, 106, 33 }, 1234 { ahc_patch21_func, 588, 1, 1 },
1235 { ahc_patch4_func, 591, 1, 1 }, 1235 { ahc_patch8_func, 590, 106, 33 },
1236 { ahc_patch1_func, 597, 2, 2 }, 1236 { ahc_patch4_func, 592, 1, 1 },
1237 { ahc_patch0_func, 599, 1, 1 }, 1237 { ahc_patch1_func, 598, 2, 2 },
1238 { ahc_patch1_func, 602, 1, 2 }, 1238 { ahc_patch0_func, 600, 1, 1 },
1239 { ahc_patch0_func, 603, 1, 1 }, 1239 { ahc_patch1_func, 603, 1, 2 },
1240 { ahc_patch9_func, 604, 3, 3 }, 1240 { ahc_patch0_func, 604, 1, 1 },
1241 { ahc_patch15_func, 605, 1, 1 }, 1241 { ahc_patch9_func, 605, 3, 3 },
1242 { ahc_patch0_func, 607, 4, 1 }, 1242 { ahc_patch15_func, 606, 1, 1 },
1243 { ahc_patch19_func, 616, 2, 2 }, 1243 { ahc_patch0_func, 608, 4, 1 },
1244 { ahc_patch0_func, 618, 1, 1 }, 1244 { ahc_patch19_func, 617, 2, 2 },
1245 { ahc_patch19_func, 622, 10, 3 }, 1245 { ahc_patch0_func, 619, 1, 1 },
1246 { ahc_patch5_func, 624, 8, 1 }, 1246 { ahc_patch19_func, 623, 10, 3 },
1247 { ahc_patch0_func, 632, 9, 2 }, 1247 { ahc_patch5_func, 625, 8, 1 },
1248 { ahc_patch5_func, 633, 8, 1 }, 1248 { ahc_patch0_func, 633, 9, 2 },
1249 { ahc_patch4_func, 643, 1, 2 }, 1249 { ahc_patch5_func, 634, 8, 1 },
1250 { ahc_patch0_func, 644, 1, 1 }, 1250 { ahc_patch4_func, 644, 1, 2 },
1251 { ahc_patch19_func, 645, 1, 2 }, 1251 { ahc_patch0_func, 645, 1, 1 },
1252 { ahc_patch0_func, 646, 3, 2 }, 1252 { ahc_patch19_func, 646, 1, 2 },
1253 { ahc_patch4_func, 648, 1, 1 }, 1253 { ahc_patch0_func, 647, 3, 2 },
1254 { ahc_patch5_func, 649, 1, 1 }, 1254 { ahc_patch4_func, 649, 1, 1 },
1255 { ahc_patch5_func, 652, 1, 1 }, 1255 { ahc_patch5_func, 650, 1, 1 },
1256 { ahc_patch5_func, 654, 1, 1 }, 1256 { ahc_patch5_func, 653, 1, 1 },
1257 { ahc_patch4_func, 656, 2, 2 }, 1257 { ahc_patch5_func, 655, 1, 1 },
1258 { ahc_patch0_func, 658, 2, 1 }, 1258 { ahc_patch4_func, 657, 2, 2 },
1259 { ahc_patch5_func, 660, 1, 1 }, 1259 { ahc_patch0_func, 659, 2, 1 },
1260 { ahc_patch5_func, 663, 1, 1 }, 1260 { ahc_patch5_func, 661, 1, 1 },
1261 { ahc_patch5_func, 666, 1, 1 }, 1261 { ahc_patch5_func, 664, 1, 1 },
1262 { ahc_patch19_func, 670, 1, 1 }, 1262 { ahc_patch5_func, 667, 1, 1 },
1263 { ahc_patch19_func, 673, 1, 1 }, 1263 { ahc_patch19_func, 671, 1, 1 },
1264 { ahc_patch4_func, 679, 1, 1 }, 1264 { ahc_patch19_func, 674, 1, 1 },
1265 { ahc_patch6_func, 682, 1, 2 }, 1265 { ahc_patch4_func, 680, 1, 1 },
1266 { ahc_patch0_func, 683, 1, 1 }, 1266 { ahc_patch6_func, 683, 1, 2 },
1267 { ahc_patch7_func, 695, 16, 1 }, 1267 { ahc_patch0_func, 684, 1, 1 },
1268 { ahc_patch4_func, 711, 20, 1 }, 1268 { ahc_patch7_func, 696, 16, 1 },
1269 { ahc_patch9_func, 732, 4, 2 }, 1269 { ahc_patch4_func, 712, 20, 1 },
1270 { ahc_patch0_func, 736, 4, 1 }, 1270 { ahc_patch9_func, 733, 4, 2 },
1271 { ahc_patch9_func, 740, 4, 2 }, 1271 { ahc_patch0_func, 737, 4, 1 },
1272 { ahc_patch0_func, 744, 3, 1 }, 1272 { ahc_patch9_func, 741, 4, 2 },
1273 { ahc_patch6_func, 750, 1, 1 }, 1273 { ahc_patch0_func, 745, 3, 1 },
1274 { ahc_patch22_func, 752, 14, 1 }, 1274 { ahc_patch6_func, 751, 1, 1 },
1275 { ahc_patch7_func, 766, 3, 1 }, 1275 { ahc_patch22_func, 753, 14, 1 },
1276 { ahc_patch9_func, 778, 24, 8 }, 1276 { ahc_patch7_func, 767, 3, 1 },
1277 { ahc_patch19_func, 782, 1, 2 }, 1277 { ahc_patch9_func, 779, 24, 8 },
1278 { ahc_patch0_func, 783, 1, 1 }, 1278 { ahc_patch19_func, 783, 1, 2 },
1279 { ahc_patch15_func, 788, 4, 2 }, 1279 { ahc_patch0_func, 784, 1, 1 },
1280 { ahc_patch0_func, 792, 7, 3 }, 1280 { ahc_patch15_func, 789, 4, 2 },
1281 { ahc_patch23_func, 792, 5, 2 }, 1281 { ahc_patch0_func, 793, 7, 3 },
1282 { ahc_patch0_func, 797, 2, 1 }, 1282 { ahc_patch23_func, 793, 5, 2 },
1283 { ahc_patch0_func, 802, 42, 3 }, 1283 { ahc_patch0_func, 798, 2, 1 },
1284 { ahc_patch18_func, 814, 18, 2 }, 1284 { ahc_patch0_func, 803, 42, 3 },
1285 { ahc_patch0_func, 832, 1, 1 }, 1285 { ahc_patch18_func, 815, 18, 2 },
1286 { ahc_patch4_func, 856, 1, 1 }, 1286 { ahc_patch0_func, 833, 1, 1 },
1287 { ahc_patch4_func, 857, 3, 2 }, 1287 { ahc_patch4_func, 857, 1, 1 },
1288 { ahc_patch0_func, 860, 1, 1 }, 1288 { ahc_patch4_func, 858, 3, 2 },
1289 { ahc_patch13_func, 861, 3, 1 }, 1289 { ahc_patch0_func, 861, 1, 1 },
1290 { ahc_patch4_func, 864, 12, 1 } 1290 { ahc_patch13_func, 862, 3, 1 },
1291 { ahc_patch4_func, 865, 12, 1 }
1291}; 1292};
1292 1293
1293static struct cs { 1294static struct cs {
@@ -1296,11 +1297,11 @@ static struct cs {
1296} critical_sections[] = { 1297} critical_sections[] = {
1297 { 11, 18 }, 1298 { 11, 18 },
1298 { 21, 30 }, 1299 { 21, 30 },
1299 { 711, 727 }, 1300 { 712, 728 },
1300 { 857, 860 }, 1301 { 858, 861 },
1301 { 864, 870 }, 1302 { 865, 871 },
1302 { 872, 874 }, 1303 { 873, 875 },
1303 { 874, 876 } 1304 { 875, 877 }
1304}; 1305};
1305 1306
1306static const int num_critical_sections = sizeof(critical_sections) 1307static const int num_critical_sections = sizeof(critical_sections)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
index 7c5a6db0e672..828ae3d9a510 100644
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ b/drivers/scsi/aic7xxx/aiclib.c
@@ -30,1382 +30,5 @@
30 * $Id$ 30 * $Id$
31 */ 31 */
32 32
33#include <linux/blkdev.h>
34#include <linux/delay.h>
35#include <linux/version.h>
36
37/* Core SCSI definitions */
38#include <scsi/scsi_host.h>
39#include "aiclib.h" 33#include "aiclib.h"
40#include "cam.h"
41
42#ifndef FALSE
43#define FALSE 0
44#endif /* FALSE */
45#ifndef TRUE
46#define TRUE 1
47#endif /* TRUE */
48#ifndef ERESTART
49#define ERESTART -1 /* restart syscall */
50#endif
51#ifndef EJUSTRETURN
52#define EJUSTRETURN -2 /* don't modify regs, just return */
53#endif
54
55static int ascentrycomp(const void *key, const void *member);
56static int senseentrycomp(const void *key, const void *member);
57static void fetchtableentries(int sense_key, int asc, int ascq,
58 struct scsi_inquiry_data *,
59 const struct sense_key_table_entry **,
60 const struct asc_table_entry **);
61static void * scsibsearch(const void *key, const void *base, size_t nmemb,
62 size_t size,
63 int (*compar)(const void *, const void *));
64typedef int (cam_quirkmatch_t)(caddr_t, caddr_t);
65static int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern,
66 int str_len);
67static caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table,
68 int num_entries, int entry_size,
69 cam_quirkmatch_t *comp_func);
70
71#define SCSI_NO_SENSE_STRINGS 1
72#if !defined(SCSI_NO_SENSE_STRINGS)
73#define SST(asc, ascq, action, desc) \
74 asc, ascq, action, desc
75#else
76static const char empty_string[] = "";
77
78#define SST(asc, ascq, action, desc) \
79 asc, ascq, action, empty_string
80#endif
81
82static const struct sense_key_table_entry sense_key_table[] =
83{
84 { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" },
85 { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" },
86 {
87 SSD_KEY_NOT_READY, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
88 "NOT READY"
89 },
90 { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" },
91 { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" },
92 { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" },
93 { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" },
94 { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" },
95 { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" },
96 { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" },
97 { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" },
98 { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" },
99 { SSD_KEY_EQUAL, SS_NOP, "EQUAL" },
100 { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" },
101 { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" },
102 { SSD_KEY_RESERVED, SS_FATAL|EIO, "RESERVED" }
103};
104
105static const int sense_key_table_size =
106 sizeof(sense_key_table)/sizeof(sense_key_table[0]);
107
108static struct asc_table_entry quantum_fireball_entries[] = {
109 {SST(0x04, 0x0b, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
110 "Logical unit not ready, initializing cmd. required")}
111};
112
113static struct asc_table_entry sony_mo_entries[] = {
114 {SST(0x04, 0x00, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
115 "Logical unit not ready, cause not reportable")}
116};
117
118static struct scsi_sense_quirk_entry sense_quirk_table[] = {
119 {
120 /*
121 * The Quantum Fireball ST and SE like to return 0x04 0x0b when
122 * they really should return 0x04 0x02. 0x04,0x0b isn't
123 * defined in any SCSI spec, and it isn't mentioned in the
124 * hardware manual for these drives.
125 */
126 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"},
127 /*num_sense_keys*/0,
128 sizeof(quantum_fireball_entries)/sizeof(struct asc_table_entry),
129 /*sense key entries*/NULL,
130 quantum_fireball_entries
131 },
132 {
133 /*
134 * This Sony MO drive likes to return 0x04, 0x00 when it
135 * isn't spun up.
136 */
137 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"},
138 /*num_sense_keys*/0,
139 sizeof(sony_mo_entries)/sizeof(struct asc_table_entry),
140 /*sense key entries*/NULL,
141 sony_mo_entries
142 }
143};
144
145static const int sense_quirk_table_size =
146 sizeof(sense_quirk_table)/sizeof(sense_quirk_table[0]);
147
148static struct asc_table_entry asc_table[] = {
149/*
150 * From File: ASC-NUM.TXT
151 * SCSI ASC/ASCQ Assignments
152 * Numeric Sorted Listing
153 * as of 5/12/97
154 *
155 * D - DIRECT ACCESS DEVICE (SBC) device column key
156 * .T - SEQUENTIAL ACCESS DEVICE (SSC) -------------------
157 * . L - PRINTER DEVICE (SSC) blank = reserved
158 * . P - PROCESSOR DEVICE (SPC) not blank = allowed
159 * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC)
160 * . . R - CD DEVICE (MMC)
161 * . . S - SCANNER DEVICE (SGC)
162 * . . .O - OPTICAL MEMORY DEVICE (SBC)
163 * . . . M - MEDIA CHANGER DEVICE (SMC)
164 * . . . C - COMMUNICATION DEVICE (SSC)
165 * . . . .A - STORAGE ARRAY DEVICE (SCC)
166 * . . . . E - ENCLOSURE SERVICES DEVICE (SES)
167 * DTLPWRSOMCAE ASC ASCQ Action Description
168 * ------------ ---- ---- ------ -----------------------------------*/
169/* DTLPWRSOMCAE */{SST(0x00, 0x00, SS_NOP,
170 "No additional sense information") },
171/* T S */{SST(0x00, 0x01, SS_RDEF,
172 "Filemark detected") },
173/* T S */{SST(0x00, 0x02, SS_RDEF,
174 "End-of-partition/medium detected") },
175/* T */{SST(0x00, 0x03, SS_RDEF,
176 "Setmark detected") },
177/* T S */{SST(0x00, 0x04, SS_RDEF,
178 "Beginning-of-partition/medium detected") },
179/* T S */{SST(0x00, 0x05, SS_RDEF,
180 "End-of-data detected") },
181/* DTLPWRSOMCAE */{SST(0x00, 0x06, SS_RDEF,
182 "I/O process terminated") },
183/* R */{SST(0x00, 0x11, SS_FATAL|EBUSY,
184 "Audio play operation in progress") },
185/* R */{SST(0x00, 0x12, SS_NOP,
186 "Audio play operation paused") },
187/* R */{SST(0x00, 0x13, SS_NOP,
188 "Audio play operation successfully completed") },
189/* R */{SST(0x00, 0x14, SS_RDEF,
190 "Audio play operation stopped due to error") },
191/* R */{SST(0x00, 0x15, SS_NOP,
192 "No current audio status to return") },
193/* DTLPWRSOMCAE */{SST(0x00, 0x16, SS_FATAL|EBUSY,
194 "Operation in progress") },
195/* DTL WRSOM AE */{SST(0x00, 0x17, SS_RDEF,
196 "Cleaning requested") },
197/* D W O */{SST(0x01, 0x00, SS_RDEF,
198 "No index/sector signal") },
199/* D WR OM */{SST(0x02, 0x00, SS_RDEF,
200 "No seek complete") },
201/* DTL W SO */{SST(0x03, 0x00, SS_RDEF,
202 "Peripheral device write fault") },
203/* T */{SST(0x03, 0x01, SS_RDEF,
204 "No write current") },
205/* T */{SST(0x03, 0x02, SS_RDEF,
206 "Excessive write errors") },
207/* DTLPWRSOMCAE */{SST(0x04, 0x00,
208 SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EIO,
209 "Logical unit not ready, cause not reportable") },
210/* DTLPWRSOMCAE */{SST(0x04, 0x01,
211 SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
212 "Logical unit is in process of becoming ready") },
213/* DTLPWRSOMCAE */{SST(0x04, 0x02, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
214 "Logical unit not ready, initializing cmd. required") },
215/* DTLPWRSOMCAE */{SST(0x04, 0x03, SS_FATAL|ENXIO,
216 "Logical unit not ready, manual intervention required")},
217/* DTL O */{SST(0x04, 0x04, SS_FATAL|EBUSY,
218 "Logical unit not ready, format in progress") },
219/* DT W OMCA */{SST(0x04, 0x05, SS_FATAL|EBUSY,
220 "Logical unit not ready, rebuild in progress") },
221/* DT W OMCA */{SST(0x04, 0x06, SS_FATAL|EBUSY,
222 "Logical unit not ready, recalculation in progress") },
223/* DTLPWRSOMCAE */{SST(0x04, 0x07, SS_FATAL|EBUSY,
224 "Logical unit not ready, operation in progress") },
225/* R */{SST(0x04, 0x08, SS_FATAL|EBUSY,
226 "Logical unit not ready, long write in progress") },
227/* DTL WRSOMCAE */{SST(0x05, 0x00, SS_RDEF,
228 "Logical unit does not respond to selection") },
229/* D WR OM */{SST(0x06, 0x00, SS_RDEF,
230 "No reference position found") },
231/* DTL WRSOM */{SST(0x07, 0x00, SS_RDEF,
232 "Multiple peripheral devices selected") },
233/* DTL WRSOMCAE */{SST(0x08, 0x00, SS_RDEF,
234 "Logical unit communication failure") },
235/* DTL WRSOMCAE */{SST(0x08, 0x01, SS_RDEF,
236 "Logical unit communication time-out") },
237/* DTL WRSOMCAE */{SST(0x08, 0x02, SS_RDEF,
238 "Logical unit communication parity error") },
239/* DT R OM */{SST(0x08, 0x03, SS_RDEF,
240 "Logical unit communication crc error (ultra-dma/32)")},
241/* DT WR O */{SST(0x09, 0x00, SS_RDEF,
242 "Track following error") },
243/* WR O */{SST(0x09, 0x01, SS_RDEF,
244 "Tracking servo failure") },
245/* WR O */{SST(0x09, 0x02, SS_RDEF,
246 "Focus servo failure") },
247/* WR O */{SST(0x09, 0x03, SS_RDEF,
248 "Spindle servo failure") },
249/* DT WR O */{SST(0x09, 0x04, SS_RDEF,
250 "Head select fault") },
251/* DTLPWRSOMCAE */{SST(0x0A, 0x00, SS_FATAL|ENOSPC,
252 "Error log overflow") },
253/* DTLPWRSOMCAE */{SST(0x0B, 0x00, SS_RDEF,
254 "Warning") },
255/* DTLPWRSOMCAE */{SST(0x0B, 0x01, SS_RDEF,
256 "Specified temperature exceeded") },
257/* DTLPWRSOMCAE */{SST(0x0B, 0x02, SS_RDEF,
258 "Enclosure degraded") },
259/* T RS */{SST(0x0C, 0x00, SS_RDEF,
260 "Write error") },
261/* D W O */{SST(0x0C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
262 "Write error - recovered with auto reallocation") },
263/* D W O */{SST(0x0C, 0x02, SS_RDEF,
264 "Write error - auto reallocation failed") },
265/* D W O */{SST(0x0C, 0x03, SS_RDEF,
266 "Write error - recommend reassignment") },
267/* DT W O */{SST(0x0C, 0x04, SS_RDEF,
268 "Compression check miscompare error") },
269/* DT W O */{SST(0x0C, 0x05, SS_RDEF,
270 "Data expansion occurred during compression") },
271/* DT W O */{SST(0x0C, 0x06, SS_RDEF,
272 "Block not compressible") },
273/* R */{SST(0x0C, 0x07, SS_RDEF,
274 "Write error - recovery needed") },
275/* R */{SST(0x0C, 0x08, SS_RDEF,
276 "Write error - recovery failed") },
277/* R */{SST(0x0C, 0x09, SS_RDEF,
278 "Write error - loss of streaming") },
279/* R */{SST(0x0C, 0x0A, SS_RDEF,
280 "Write error - padding blocks added") },
281/* D W O */{SST(0x10, 0x00, SS_RDEF,
282 "ID CRC or ECC error") },
283/* DT WRSO */{SST(0x11, 0x00, SS_RDEF,
284 "Unrecovered read error") },
285/* DT W SO */{SST(0x11, 0x01, SS_RDEF,
286 "Read retries exhausted") },
287/* DT W SO */{SST(0x11, 0x02, SS_RDEF,
288 "Error too long to correct") },
289/* DT W SO */{SST(0x11, 0x03, SS_RDEF,
290 "Multiple read errors") },
291/* D W O */{SST(0x11, 0x04, SS_RDEF,
292 "Unrecovered read error - auto reallocate failed") },
293/* WR O */{SST(0x11, 0x05, SS_RDEF,
294 "L-EC uncorrectable error") },
295/* WR O */{SST(0x11, 0x06, SS_RDEF,
296 "CIRC unrecovered error") },
297/* W O */{SST(0x11, 0x07, SS_RDEF,
298 "Data re-synchronization error") },
299/* T */{SST(0x11, 0x08, SS_RDEF,
300 "Incomplete block read") },
301/* T */{SST(0x11, 0x09, SS_RDEF,
302 "No gap found") },
303/* DT O */{SST(0x11, 0x0A, SS_RDEF,
304 "Miscorrected error") },
305/* D W O */{SST(0x11, 0x0B, SS_RDEF,
306 "Unrecovered read error - recommend reassignment") },
307/* D W O */{SST(0x11, 0x0C, SS_RDEF,
308 "Unrecovered read error - recommend rewrite the data")},
309/* DT WR O */{SST(0x11, 0x0D, SS_RDEF,
310 "De-compression CRC error") },
311/* DT WR O */{SST(0x11, 0x0E, SS_RDEF,
312 "Cannot decompress using declared algorithm") },
313/* R */{SST(0x11, 0x0F, SS_RDEF,
314 "Error reading UPC/EAN number") },
315/* R */{SST(0x11, 0x10, SS_RDEF,
316 "Error reading ISRC number") },
317/* R */{SST(0x11, 0x11, SS_RDEF,
318 "Read error - loss of streaming") },
319/* D W O */{SST(0x12, 0x00, SS_RDEF,
320 "Address mark not found for id field") },
321/* D W O */{SST(0x13, 0x00, SS_RDEF,
322 "Address mark not found for data field") },
323/* DTL WRSO */{SST(0x14, 0x00, SS_RDEF,
324 "Recorded entity not found") },
325/* DT WR O */{SST(0x14, 0x01, SS_RDEF,
326 "Record not found") },
327/* T */{SST(0x14, 0x02, SS_RDEF,
328 "Filemark or setmark not found") },
329/* T */{SST(0x14, 0x03, SS_RDEF,
330 "End-of-data not found") },
331/* T */{SST(0x14, 0x04, SS_RDEF,
332 "Block sequence error") },
333/* DT W O */{SST(0x14, 0x05, SS_RDEF,
334 "Record not found - recommend reassignment") },
335/* DT W O */{SST(0x14, 0x06, SS_RDEF,
336 "Record not found - data auto-reallocated") },
337/* DTL WRSOM */{SST(0x15, 0x00, SS_RDEF,
338 "Random positioning error") },
339/* DTL WRSOM */{SST(0x15, 0x01, SS_RDEF,
340 "Mechanical positioning error") },
341/* DT WR O */{SST(0x15, 0x02, SS_RDEF,
342 "Positioning error detected by read of medium") },
343/* D W O */{SST(0x16, 0x00, SS_RDEF,
344 "Data synchronization mark error") },
345/* D W O */{SST(0x16, 0x01, SS_RDEF,
346 "Data sync error - data rewritten") },
347/* D W O */{SST(0x16, 0x02, SS_RDEF,
348 "Data sync error - recommend rewrite") },
349/* D W O */{SST(0x16, 0x03, SS_NOP|SSQ_PRINT_SENSE,
350 "Data sync error - data auto-reallocated") },
351/* D W O */{SST(0x16, 0x04, SS_RDEF,
352 "Data sync error - recommend reassignment") },
353/* DT WRSO */{SST(0x17, 0x00, SS_NOP|SSQ_PRINT_SENSE,
354 "Recovered data with no error correction applied") },
355/* DT WRSO */{SST(0x17, 0x01, SS_NOP|SSQ_PRINT_SENSE,
356 "Recovered data with retries") },
357/* DT WR O */{SST(0x17, 0x02, SS_NOP|SSQ_PRINT_SENSE,
358 "Recovered data with positive head offset") },
359/* DT WR O */{SST(0x17, 0x03, SS_NOP|SSQ_PRINT_SENSE,
360 "Recovered data with negative head offset") },
361/* WR O */{SST(0x17, 0x04, SS_NOP|SSQ_PRINT_SENSE,
362 "Recovered data with retries and/or CIRC applied") },
363/* D WR O */{SST(0x17, 0x05, SS_NOP|SSQ_PRINT_SENSE,
364 "Recovered data using previous sector id") },
365/* D W O */{SST(0x17, 0x06, SS_NOP|SSQ_PRINT_SENSE,
366 "Recovered data without ECC - data auto-reallocated") },
367/* D W O */{SST(0x17, 0x07, SS_NOP|SSQ_PRINT_SENSE,
368 "Recovered data without ECC - recommend reassignment")},
369/* D W O */{SST(0x17, 0x08, SS_NOP|SSQ_PRINT_SENSE,
370 "Recovered data without ECC - recommend rewrite") },
371/* D W O */{SST(0x17, 0x09, SS_NOP|SSQ_PRINT_SENSE,
372 "Recovered data without ECC - data rewritten") },
373/* D W O */{SST(0x18, 0x00, SS_NOP|SSQ_PRINT_SENSE,
374 "Recovered data with error correction applied") },
375/* D WR O */{SST(0x18, 0x01, SS_NOP|SSQ_PRINT_SENSE,
376 "Recovered data with error corr. & retries applied") },
377/* D WR O */{SST(0x18, 0x02, SS_NOP|SSQ_PRINT_SENSE,
378 "Recovered data - data auto-reallocated") },
379/* R */{SST(0x18, 0x03, SS_NOP|SSQ_PRINT_SENSE,
380 "Recovered data with CIRC") },
381/* R */{SST(0x18, 0x04, SS_NOP|SSQ_PRINT_SENSE,
382 "Recovered data with L-EC") },
383/* D WR O */{SST(0x18, 0x05, SS_NOP|SSQ_PRINT_SENSE,
384 "Recovered data - recommend reassignment") },
385/* D WR O */{SST(0x18, 0x06, SS_NOP|SSQ_PRINT_SENSE,
386 "Recovered data - recommend rewrite") },
387/* D W O */{SST(0x18, 0x07, SS_NOP|SSQ_PRINT_SENSE,
388 "Recovered data with ECC - data rewritten") },
389/* D O */{SST(0x19, 0x00, SS_RDEF,
390 "Defect list error") },
391/* D O */{SST(0x19, 0x01, SS_RDEF,
392 "Defect list not available") },
393/* D O */{SST(0x19, 0x02, SS_RDEF,
394 "Defect list error in primary list") },
395/* D O */{SST(0x19, 0x03, SS_RDEF,
396 "Defect list error in grown list") },
397/* DTLPWRSOMCAE */{SST(0x1A, 0x00, SS_RDEF,
398 "Parameter list length error") },
399/* DTLPWRSOMCAE */{SST(0x1B, 0x00, SS_RDEF,
400 "Synchronous data transfer error") },
401/* D O */{SST(0x1C, 0x00, SS_RDEF,
402 "Defect list not found") },
403/* D O */{SST(0x1C, 0x01, SS_RDEF,
404 "Primary defect list not found") },
405/* D O */{SST(0x1C, 0x02, SS_RDEF,
406 "Grown defect list not found") },
407/* D W O */{SST(0x1D, 0x00, SS_FATAL,
408 "Miscompare during verify operation" )},
409/* D W O */{SST(0x1E, 0x00, SS_NOP|SSQ_PRINT_SENSE,
410 "Recovered id with ecc correction") },
411/* D O */{SST(0x1F, 0x00, SS_RDEF,
412 "Partial defect list transfer") },
413/* DTLPWRSOMCAE */{SST(0x20, 0x00, SS_FATAL|EINVAL,
414 "Invalid command operation code") },
415/* DT WR OM */{SST(0x21, 0x00, SS_FATAL|EINVAL,
416 "Logical block address out of range" )},
417/* DT WR OM */{SST(0x21, 0x01, SS_FATAL|EINVAL,
418 "Invalid element address") },
419/* D */{SST(0x22, 0x00, SS_FATAL|EINVAL,
420 "Illegal function") }, /* Deprecated. Use 20 00, 24 00, or 26 00 instead */
421/* DTLPWRSOMCAE */{SST(0x24, 0x00, SS_FATAL|EINVAL,
422 "Invalid field in CDB") },
423/* DTLPWRSOMCAE */{SST(0x25, 0x00, SS_FATAL|ENXIO,
424 "Logical unit not supported") },
425/* DTLPWRSOMCAE */{SST(0x26, 0x00, SS_FATAL|EINVAL,
426 "Invalid field in parameter list") },
427/* DTLPWRSOMCAE */{SST(0x26, 0x01, SS_FATAL|EINVAL,
428 "Parameter not supported") },
429/* DTLPWRSOMCAE */{SST(0x26, 0x02, SS_FATAL|EINVAL,
430 "Parameter value invalid") },
431/* DTLPWRSOMCAE */{SST(0x26, 0x03, SS_FATAL|EINVAL,
432 "Threshold parameters not supported") },
433/* DTLPWRSOMCAE */{SST(0x26, 0x04, SS_FATAL|EINVAL,
434 "Invalid release of active persistent reservation") },
435/* DT W O */{SST(0x27, 0x00, SS_FATAL|EACCES,
436 "Write protected") },
437/* DT W O */{SST(0x27, 0x01, SS_FATAL|EACCES,
438 "Hardware write protected") },
439/* DT W O */{SST(0x27, 0x02, SS_FATAL|EACCES,
440 "Logical unit software write protected") },
441/* T */{SST(0x27, 0x03, SS_FATAL|EACCES,
442 "Associated write protect") },
443/* T */{SST(0x27, 0x04, SS_FATAL|EACCES,
444 "Persistent write protect") },
445/* T */{SST(0x27, 0x05, SS_FATAL|EACCES,
446 "Permanent write protect") },
447/* DTLPWRSOMCAE */{SST(0x28, 0x00, SS_RDEF,
448 "Not ready to ready change, medium may have changed") },
449/* DTLPWRSOMCAE */{SST(0x28, 0x01, SS_FATAL|ENXIO,
450 "Import or export element accessed") },
451/*
452 * XXX JGibbs - All of these should use the same errno, but I don't think
453 * ENXIO is the correct choice. Should we borrow from the networking
454 * errnos? ECONNRESET anyone?
455 */
456/* DTLPWRSOMCAE */{SST(0x29, 0x00, SS_RDEF,
457 "Power on, reset, or bus device reset occurred") },
458/* DTLPWRSOMCAE */{SST(0x29, 0x01, SS_RDEF,
459 "Power on occurred") },
460/* DTLPWRSOMCAE */{SST(0x29, 0x02, SS_RDEF,
461 "Scsi bus reset occurred") },
462/* DTLPWRSOMCAE */{SST(0x29, 0x03, SS_RDEF,
463 "Bus device reset function occurred") },
464/* DTLPWRSOMCAE */{SST(0x29, 0x04, SS_RDEF,
465 "Device internal reset") },
466/* DTLPWRSOMCAE */{SST(0x29, 0x05, SS_RDEF,
467 "Transceiver mode changed to single-ended") },
468/* DTLPWRSOMCAE */{SST(0x29, 0x06, SS_RDEF,
469 "Transceiver mode changed to LVD") },
470/* DTL WRSOMCAE */{SST(0x2A, 0x00, SS_RDEF,
471 "Parameters changed") },
472/* DTL WRSOMCAE */{SST(0x2A, 0x01, SS_RDEF,
473 "Mode parameters changed") },
474/* DTL WRSOMCAE */{SST(0x2A, 0x02, SS_RDEF,
475 "Log parameters changed") },
476/* DTLPWRSOMCAE */{SST(0x2A, 0x03, SS_RDEF,
477 "Reservations preempted") },
478/* DTLPWRSO C */{SST(0x2B, 0x00, SS_RDEF,
479 "Copy cannot execute since host cannot disconnect") },
480/* DTLPWRSOMCAE */{SST(0x2C, 0x00, SS_RDEF,
481 "Command sequence error") },
482/* S */{SST(0x2C, 0x01, SS_RDEF,
483 "Too many windows specified") },
484/* S */{SST(0x2C, 0x02, SS_RDEF,
485 "Invalid combination of windows specified") },
486/* R */{SST(0x2C, 0x03, SS_RDEF,
487 "Current program area is not empty") },
488/* R */{SST(0x2C, 0x04, SS_RDEF,
489 "Current program area is empty") },
490/* T */{SST(0x2D, 0x00, SS_RDEF,
491 "Overwrite error on update in place") },
492/* DTLPWRSOMCAE */{SST(0x2F, 0x00, SS_RDEF,
493 "Commands cleared by another initiator") },
494/* DT WR OM */{SST(0x30, 0x00, SS_RDEF,
495 "Incompatible medium installed") },
496/* DT WR O */{SST(0x30, 0x01, SS_RDEF,
497 "Cannot read medium - unknown format") },
498/* DT WR O */{SST(0x30, 0x02, SS_RDEF,
499 "Cannot read medium - incompatible format") },
500/* DT */{SST(0x30, 0x03, SS_RDEF,
501 "Cleaning cartridge installed") },
502/* DT WR O */{SST(0x30, 0x04, SS_RDEF,
503 "Cannot write medium - unknown format") },
504/* DT WR O */{SST(0x30, 0x05, SS_RDEF,
505 "Cannot write medium - incompatible format") },
506/* DT W O */{SST(0x30, 0x06, SS_RDEF,
507 "Cannot format medium - incompatible medium") },
508/* DTL WRSOM AE */{SST(0x30, 0x07, SS_RDEF,
509 "Cleaning failure") },
510/* R */{SST(0x30, 0x08, SS_RDEF,
511 "Cannot write - application code mismatch") },
512/* R */{SST(0x30, 0x09, SS_RDEF,
513 "Current session not fixated for append") },
514/* DT WR O */{SST(0x31, 0x00, SS_RDEF,
515 "Medium format corrupted") },
516/* D L R O */{SST(0x31, 0x01, SS_RDEF,
517 "Format command failed") },
518/* D W O */{SST(0x32, 0x00, SS_RDEF,
519 "No defect spare location available") },
520/* D W O */{SST(0x32, 0x01, SS_RDEF,
521 "Defect list update failure") },
522/* T */{SST(0x33, 0x00, SS_RDEF,
523 "Tape length error") },
524/* DTLPWRSOMCAE */{SST(0x34, 0x00, SS_RDEF,
525 "Enclosure failure") },
526/* DTLPWRSOMCAE */{SST(0x35, 0x00, SS_RDEF,
527 "Enclosure services failure") },
528/* DTLPWRSOMCAE */{SST(0x35, 0x01, SS_RDEF,
529 "Unsupported enclosure function") },
530/* DTLPWRSOMCAE */{SST(0x35, 0x02, SS_RDEF,
531 "Enclosure services unavailable") },
532/* DTLPWRSOMCAE */{SST(0x35, 0x03, SS_RDEF,
533 "Enclosure services transfer failure") },
534/* DTLPWRSOMCAE */{SST(0x35, 0x04, SS_RDEF,
535 "Enclosure services transfer refused") },
536/* L */{SST(0x36, 0x00, SS_RDEF,
537 "Ribbon, ink, or toner failure") },
538/* DTL WRSOMCAE */{SST(0x37, 0x00, SS_RDEF,
539 "Rounded parameter") },
540/* DTL WRSOMCAE */{SST(0x39, 0x00, SS_RDEF,
541 "Saving parameters not supported") },
542/* DTL WRSOM */{SST(0x3A, 0x00, SS_NOP,
543 "Medium not present") },
544/* DT WR OM */{SST(0x3A, 0x01, SS_NOP,
545 "Medium not present - tray closed") },
546/* DT WR OM */{SST(0x3A, 0x01, SS_NOP,
547 "Medium not present - tray open") },
548/* DT WR OM */{SST(0x3A, 0x03, SS_NOP,
549 "Medium not present - Loadable") },
550/* DT WR OM */{SST(0x3A, 0x04, SS_NOP,
551 "Medium not present - medium auxiliary "
552 "memory accessible") },
553/* DT WR OM */{SST(0x3A, 0xFF, SS_NOP, NULL) },/* Range 0x05->0xFF */
554/* TL */{SST(0x3B, 0x00, SS_RDEF,
555 "Sequential positioning error") },
556/* T */{SST(0x3B, 0x01, SS_RDEF,
557 "Tape position error at beginning-of-medium") },
558/* T */{SST(0x3B, 0x02, SS_RDEF,
559 "Tape position error at end-of-medium") },
560/* L */{SST(0x3B, 0x03, SS_RDEF,
561 "Tape or electronic vertical forms unit not ready") },
562/* L */{SST(0x3B, 0x04, SS_RDEF,
563 "Slew failure") },
564/* L */{SST(0x3B, 0x05, SS_RDEF,
565 "Paper jam") },
566/* L */{SST(0x3B, 0x06, SS_RDEF,
567 "Failed to sense top-of-form") },
568/* L */{SST(0x3B, 0x07, SS_RDEF,
569 "Failed to sense bottom-of-form") },
570/* T */{SST(0x3B, 0x08, SS_RDEF,
571 "Reposition error") },
572/* S */{SST(0x3B, 0x09, SS_RDEF,
573 "Read past end of medium") },
574/* S */{SST(0x3B, 0x0A, SS_RDEF,
575 "Read past beginning of medium") },
576/* S */{SST(0x3B, 0x0B, SS_RDEF,
577 "Position past end of medium") },
578/* T S */{SST(0x3B, 0x0C, SS_RDEF,
579 "Position past beginning of medium") },
580/* DT WR OM */{SST(0x3B, 0x0D, SS_FATAL|ENOSPC,
581 "Medium destination element full") },
582/* DT WR OM */{SST(0x3B, 0x0E, SS_RDEF,
583 "Medium source element empty") },
584/* R */{SST(0x3B, 0x0F, SS_RDEF,
585 "End of medium reached") },
586/* DT WR OM */{SST(0x3B, 0x11, SS_RDEF,
587 "Medium magazine not accessible") },
588/* DT WR OM */{SST(0x3B, 0x12, SS_RDEF,
589 "Medium magazine removed") },
590/* DT WR OM */{SST(0x3B, 0x13, SS_RDEF,
591 "Medium magazine inserted") },
592/* DT WR OM */{SST(0x3B, 0x14, SS_RDEF,
593 "Medium magazine locked") },
594/* DT WR OM */{SST(0x3B, 0x15, SS_RDEF,
595 "Medium magazine unlocked") },
596/* DTLPWRSOMCAE */{SST(0x3D, 0x00, SS_RDEF,
597 "Invalid bits in identify message") },
598/* DTLPWRSOMCAE */{SST(0x3E, 0x00, SS_RDEF,
599 "Logical unit has not self-configured yet") },
600/* DTLPWRSOMCAE */{SST(0x3E, 0x01, SS_RDEF,
601 "Logical unit failure") },
602/* DTLPWRSOMCAE */{SST(0x3E, 0x02, SS_RDEF,
603 "Timeout on logical unit") },
604/* DTLPWRSOMCAE */{SST(0x3F, 0x00, SS_RDEF,
605 "Target operating conditions have changed") },
606/* DTLPWRSOMCAE */{SST(0x3F, 0x01, SS_RDEF,
607 "Microcode has been changed") },
608/* DTLPWRSOMC */{SST(0x3F, 0x02, SS_RDEF,
609 "Changed operating definition") },
610/* DTLPWRSOMCAE */{SST(0x3F, 0x03, SS_INQ_REFRESH|SSQ_DECREMENT_COUNT,
611 "Inquiry data has changed") },
612/* DT WR OMCAE */{SST(0x3F, 0x04, SS_RDEF,
613 "Component device attached") },
614/* DT WR OMCAE */{SST(0x3F, 0x05, SS_RDEF,
615 "Device identifier changed") },
616/* DT WR OMCAE */{SST(0x3F, 0x06, SS_RDEF,
617 "Redundancy group created or modified") },
618/* DT WR OMCAE */{SST(0x3F, 0x07, SS_RDEF,
619 "Redundancy group deleted") },
620/* DT WR OMCAE */{SST(0x3F, 0x08, SS_RDEF,
621 "Spare created or modified") },
622/* DT WR OMCAE */{SST(0x3F, 0x09, SS_RDEF,
623 "Spare deleted") },
624/* DT WR OMCAE */{SST(0x3F, 0x0A, SS_RDEF,
625 "Volume set created or modified") },
626/* DT WR OMCAE */{SST(0x3F, 0x0B, SS_RDEF,
627 "Volume set deleted") },
628/* DT WR OMCAE */{SST(0x3F, 0x0C, SS_RDEF,
629 "Volume set deassigned") },
630/* DT WR OMCAE */{SST(0x3F, 0x0D, SS_RDEF,
631 "Volume set reassigned") },
632/* DTLPWRSOMCAE */{SST(0x3F, 0x0E, SS_RDEF,
633 "Reported luns data has changed") },
634/* DTLPWRSOMCAE */{SST(0x3F, 0x0F, SS_RETRY|SSQ_DECREMENT_COUNT
635 | SSQ_DELAY_RANDOM|EBUSY,
636 "Echo buffer overwritten") },
637/* DT WR OM B*/{SST(0x3F, 0x0F, SS_RDEF, "Medium Loadable") },
638/* DT WR OM B*/{SST(0x3F, 0x0F, SS_RDEF,
639 "Medium auxiliary memory accessible") },
640/* D */{SST(0x40, 0x00, SS_RDEF,
641 "Ram failure") }, /* deprecated - use 40 NN instead */
642/* DTLPWRSOMCAE */{SST(0x40, 0x80, SS_RDEF,
643 "Diagnostic failure: ASCQ = Component ID") },
644/* DTLPWRSOMCAE */{SST(0x40, 0xFF, SS_RDEF|SSQ_RANGE,
645 NULL) },/* Range 0x80->0xFF */
646/* D */{SST(0x41, 0x00, SS_RDEF,
647 "Data path failure") }, /* deprecated - use 40 NN instead */
648/* D */{SST(0x42, 0x00, SS_RDEF,
649 "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */
650/* DTLPWRSOMCAE */{SST(0x43, 0x00, SS_RDEF,
651 "Message error") },
652/* DTLPWRSOMCAE */{SST(0x44, 0x00, SS_RDEF,
653 "Internal target failure") },
654/* DTLPWRSOMCAE */{SST(0x45, 0x00, SS_RDEF,
655 "Select or reselect failure") },
656/* DTLPWRSOMC */{SST(0x46, 0x00, SS_RDEF,
657 "Unsuccessful soft reset") },
658/* DTLPWRSOMCAE */{SST(0x47, 0x00, SS_RDEF|SSQ_FALLBACK,
659 "SCSI parity error") },
660/* DTLPWRSOMCAE */{SST(0x47, 0x01, SS_RDEF|SSQ_FALLBACK,
661 "Data Phase CRC error detected") },
662/* DTLPWRSOMCAE */{SST(0x47, 0x02, SS_RDEF|SSQ_FALLBACK,
663 "SCSI parity error detected during ST data phase") },
664/* DTLPWRSOMCAE */{SST(0x47, 0x03, SS_RDEF|SSQ_FALLBACK,
665 "Information Unit iuCRC error") },
666/* DTLPWRSOMCAE */{SST(0x47, 0x04, SS_RDEF|SSQ_FALLBACK,
667 "Asynchronous information protection error detected") },
668/* DTLPWRSOMCAE */{SST(0x47, 0x05, SS_RDEF|SSQ_FALLBACK,
669 "Protocol server CRC error") },
670/* DTLPWRSOMCAE */{SST(0x48, 0x00, SS_RDEF|SSQ_FALLBACK,
671 "Initiator detected error message received") },
672/* DTLPWRSOMCAE */{SST(0x49, 0x00, SS_RDEF,
673 "Invalid message error") },
674/* DTLPWRSOMCAE */{SST(0x4A, 0x00, SS_RDEF,
675 "Command phase error") },
676/* DTLPWRSOMCAE */{SST(0x4B, 0x00, SS_RDEF,
677 "Data phase error") },
678/* DTLPWRSOMCAE */{SST(0x4C, 0x00, SS_RDEF,
679 "Logical unit failed self-configuration") },
680/* DTLPWRSOMCAE */{SST(0x4D, 0x00, SS_RDEF,
681 "Tagged overlapped commands: ASCQ = Queue tag ID") },
682/* DTLPWRSOMCAE */{SST(0x4D, 0xFF, SS_RDEF|SSQ_RANGE,
683 NULL)}, /* Range 0x00->0xFF */
684/* DTLPWRSOMCAE */{SST(0x4E, 0x00, SS_RDEF,
685 "Overlapped commands attempted") },
686/* T */{SST(0x50, 0x00, SS_RDEF,
687 "Write append error") },
688/* T */{SST(0x50, 0x01, SS_RDEF,
689 "Write append position error") },
690/* T */{SST(0x50, 0x02, SS_RDEF,
691 "Position error related to timing") },
692/* T O */{SST(0x51, 0x00, SS_RDEF,
693 "Erase failure") },
694/* T */{SST(0x52, 0x00, SS_RDEF,
695 "Cartridge fault") },
696/* DTL WRSOM */{SST(0x53, 0x00, SS_RDEF,
697 "Media load or eject failed") },
698/* T */{SST(0x53, 0x01, SS_RDEF,
699 "Unload tape failure") },
700/* DT WR OM */{SST(0x53, 0x02, SS_RDEF,
701 "Medium removal prevented") },
702/* P */{SST(0x54, 0x00, SS_RDEF,
703 "Scsi to host system interface failure") },
704/* P */{SST(0x55, 0x00, SS_RDEF,
705 "System resource failure") },
706/* D O */{SST(0x55, 0x01, SS_FATAL|ENOSPC,
707 "System buffer full") },
708/* R */{SST(0x57, 0x00, SS_RDEF,
709 "Unable to recover table-of-contents") },
710/* O */{SST(0x58, 0x00, SS_RDEF,
711 "Generation does not exist") },
712/* O */{SST(0x59, 0x00, SS_RDEF,
713 "Updated block read") },
714/* DTLPWRSOM */{SST(0x5A, 0x00, SS_RDEF,
715 "Operator request or state change input") },
716/* DT WR OM */{SST(0x5A, 0x01, SS_RDEF,
717 "Operator medium removal request") },
718/* DT W O */{SST(0x5A, 0x02, SS_RDEF,
719 "Operator selected write protect") },
720/* DT W O */{SST(0x5A, 0x03, SS_RDEF,
721 "Operator selected write permit") },
722/* DTLPWRSOM */{SST(0x5B, 0x00, SS_RDEF,
723 "Log exception") },
724/* DTLPWRSOM */{SST(0x5B, 0x01, SS_RDEF,
725 "Threshold condition met") },
726/* DTLPWRSOM */{SST(0x5B, 0x02, SS_RDEF,
727 "Log counter at maximum") },
728/* DTLPWRSOM */{SST(0x5B, 0x03, SS_RDEF,
729 "Log list codes exhausted") },
730/* D O */{SST(0x5C, 0x00, SS_RDEF,
731 "RPL status change") },
732/* D O */{SST(0x5C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
733 "Spindles synchronized") },
734/* D O */{SST(0x5C, 0x02, SS_RDEF,
735 "Spindles not synchronized") },
736/* DTLPWRSOMCAE */{SST(0x5D, 0x00, SS_RDEF,
737 "Failure prediction threshold exceeded") },
738/* DTLPWRSOMCAE */{SST(0x5D, 0xFF, SS_RDEF,
739 "Failure prediction threshold exceeded (false)") },
740/* DTLPWRSO CA */{SST(0x5E, 0x00, SS_RDEF,
741 "Low power condition on") },
742/* DTLPWRSO CA */{SST(0x5E, 0x01, SS_RDEF,
743 "Idle condition activated by timer") },
744/* DTLPWRSO CA */{SST(0x5E, 0x02, SS_RDEF,
745 "Standby condition activated by timer") },
746/* DTLPWRSO CA */{SST(0x5E, 0x03, SS_RDEF,
747 "Idle condition activated by command") },
748/* DTLPWRSO CA */{SST(0x5E, 0x04, SS_RDEF,
749 "Standby condition activated by command") },
750/* S */{SST(0x60, 0x00, SS_RDEF,
751 "Lamp failure") },
752/* S */{SST(0x61, 0x00, SS_RDEF,
753 "Video acquisition error") },
754/* S */{SST(0x61, 0x01, SS_RDEF,
755 "Unable to acquire video") },
756/* S */{SST(0x61, 0x02, SS_RDEF,
757 "Out of focus") },
758/* S */{SST(0x62, 0x00, SS_RDEF,
759 "Scan head positioning error") },
760/* R */{SST(0x63, 0x00, SS_RDEF,
761 "End of user area encountered on this track") },
762/* R */{SST(0x63, 0x01, SS_FATAL|ENOSPC,
763 "Packet does not fit in available space") },
764/* R */{SST(0x64, 0x00, SS_RDEF,
765 "Illegal mode for this track") },
766/* R */{SST(0x64, 0x01, SS_RDEF,
767 "Invalid packet size") },
768/* DTLPWRSOMCAE */{SST(0x65, 0x00, SS_RDEF,
769 "Voltage fault") },
770/* S */{SST(0x66, 0x00, SS_RDEF,
771 "Automatic document feeder cover up") },
772/* S */{SST(0x66, 0x01, SS_RDEF,
773 "Automatic document feeder lift up") },
774/* S */{SST(0x66, 0x02, SS_RDEF,
775 "Document jam in automatic document feeder") },
776/* S */{SST(0x66, 0x03, SS_RDEF,
777 "Document miss feed automatic in document feeder") },
778/* A */{SST(0x67, 0x00, SS_RDEF,
779 "Configuration failure") },
780/* A */{SST(0x67, 0x01, SS_RDEF,
781 "Configuration of incapable logical units failed") },
782/* A */{SST(0x67, 0x02, SS_RDEF,
783 "Add logical unit failed") },
784/* A */{SST(0x67, 0x03, SS_RDEF,
785 "Modification of logical unit failed") },
786/* A */{SST(0x67, 0x04, SS_RDEF,
787 "Exchange of logical unit failed") },
788/* A */{SST(0x67, 0x05, SS_RDEF,
789 "Remove of logical unit failed") },
790/* A */{SST(0x67, 0x06, SS_RDEF,
791 "Attachment of logical unit failed") },
792/* A */{SST(0x67, 0x07, SS_RDEF,
793 "Creation of logical unit failed") },
794/* A */{SST(0x68, 0x00, SS_RDEF,
795 "Logical unit not configured") },
796/* A */{SST(0x69, 0x00, SS_RDEF,
797 "Data loss on logical unit") },
798/* A */{SST(0x69, 0x01, SS_RDEF,
799 "Multiple logical unit failures") },
800/* A */{SST(0x69, 0x02, SS_RDEF,
801 "Parity/data mismatch") },
802/* A */{SST(0x6A, 0x00, SS_RDEF,
803 "Informational, refer to log") },
804/* A */{SST(0x6B, 0x00, SS_RDEF,
805 "State change has occurred") },
806/* A */{SST(0x6B, 0x01, SS_RDEF,
807 "Redundancy level got better") },
808/* A */{SST(0x6B, 0x02, SS_RDEF,
809 "Redundancy level got worse") },
810/* A */{SST(0x6C, 0x00, SS_RDEF,
811 "Rebuild failure occurred") },
812/* A */{SST(0x6D, 0x00, SS_RDEF,
813 "Recalculate failure occurred") },
814/* A */{SST(0x6E, 0x00, SS_RDEF,
815 "Command to logical unit failed") },
816/* T */{SST(0x70, 0x00, SS_RDEF,
817 "Decompression exception short: ASCQ = Algorithm ID") },
818/* T */{SST(0x70, 0xFF, SS_RDEF|SSQ_RANGE,
819 NULL) }, /* Range 0x00 -> 0xFF */
820/* T */{SST(0x71, 0x00, SS_RDEF,
821 "Decompression exception long: ASCQ = Algorithm ID") },
822/* T */{SST(0x71, 0xFF, SS_RDEF|SSQ_RANGE,
823 NULL) }, /* Range 0x00 -> 0xFF */
824/* R */{SST(0x72, 0x00, SS_RDEF,
825 "Session fixation error") },
826/* R */{SST(0x72, 0x01, SS_RDEF,
827 "Session fixation error writing lead-in") },
828/* R */{SST(0x72, 0x02, SS_RDEF,
829 "Session fixation error writing lead-out") },
830/* R */{SST(0x72, 0x03, SS_RDEF,
831 "Session fixation error - incomplete track in session") },
832/* R */{SST(0x72, 0x04, SS_RDEF,
833 "Empty or partially written reserved track") },
834/* R */{SST(0x73, 0x00, SS_RDEF,
835 "CD control error") },
836/* R */{SST(0x73, 0x01, SS_RDEF,
837 "Power calibration area almost full") },
838/* R */{SST(0x73, 0x02, SS_FATAL|ENOSPC,
839 "Power calibration area is full") },
840/* R */{SST(0x73, 0x03, SS_RDEF,
841 "Power calibration area error") },
842/* R */{SST(0x73, 0x04, SS_RDEF,
843 "Program memory area update failure") },
844/* R */{SST(0x73, 0x05, SS_RDEF,
845 "program memory area is full") }
846};
847
848static const int asc_table_size = sizeof(asc_table)/sizeof(asc_table[0]);
849
850struct asc_key
851{
852 int asc;
853 int ascq;
854};
855
856static int
857ascentrycomp(const void *key, const void *member)
858{
859 int asc;
860 int ascq;
861 const struct asc_table_entry *table_entry;
862
863 asc = ((const struct asc_key *)key)->asc;
864 ascq = ((const struct asc_key *)key)->ascq;
865 table_entry = (const struct asc_table_entry *)member;
866
867 if (asc >= table_entry->asc) {
868
869 if (asc > table_entry->asc)
870 return (1);
871
872 if (ascq <= table_entry->ascq) {
873 /* Check for ranges */
874 if (ascq == table_entry->ascq
875 || ((table_entry->action & SSQ_RANGE) != 0
876 && ascq >= (table_entry - 1)->ascq))
877 return (0);
878 return (-1);
879 }
880 return (1);
881 }
882 return (-1);
883}
884
885static int
886senseentrycomp(const void *key, const void *member)
887{
888 int sense_key;
889 const struct sense_key_table_entry *table_entry;
890
891 sense_key = *((const int *)key);
892 table_entry = (const struct sense_key_table_entry *)member;
893
894 if (sense_key >= table_entry->sense_key) {
895 if (sense_key == table_entry->sense_key)
896 return (0);
897 return (1);
898 }
899 return (-1);
900}
901
902static void
903fetchtableentries(int sense_key, int asc, int ascq,
904 struct scsi_inquiry_data *inq_data,
905 const struct sense_key_table_entry **sense_entry,
906 const struct asc_table_entry **asc_entry)
907{
908 void *match;
909 const struct asc_table_entry *asc_tables[2];
910 const struct sense_key_table_entry *sense_tables[2];
911 struct asc_key asc_ascq;
912 size_t asc_tables_size[2];
913 size_t sense_tables_size[2];
914 int num_asc_tables;
915 int num_sense_tables;
916 int i;
917
918 /* Default to failure */
919 *sense_entry = NULL;
920 *asc_entry = NULL;
921 match = NULL;
922 if (inq_data != NULL)
923 match = cam_quirkmatch((void *)inq_data,
924 (void *)sense_quirk_table,
925 sense_quirk_table_size,
926 sizeof(*sense_quirk_table),
927 aic_inquiry_match);
928
929 if (match != NULL) {
930 struct scsi_sense_quirk_entry *quirk;
931
932 quirk = (struct scsi_sense_quirk_entry *)match;
933 asc_tables[0] = quirk->asc_info;
934 asc_tables_size[0] = quirk->num_ascs;
935 asc_tables[1] = asc_table;
936 asc_tables_size[1] = asc_table_size;
937 num_asc_tables = 2;
938 sense_tables[0] = quirk->sense_key_info;
939 sense_tables_size[0] = quirk->num_sense_keys;
940 sense_tables[1] = sense_key_table;
941 sense_tables_size[1] = sense_key_table_size;
942 num_sense_tables = 2;
943 } else {
944 asc_tables[0] = asc_table;
945 asc_tables_size[0] = asc_table_size;
946 num_asc_tables = 1;
947 sense_tables[0] = sense_key_table;
948 sense_tables_size[0] = sense_key_table_size;
949 num_sense_tables = 1;
950 }
951
952 asc_ascq.asc = asc;
953 asc_ascq.ascq = ascq;
954 for (i = 0; i < num_asc_tables; i++) {
955 void *found_entry;
956
957 found_entry = scsibsearch(&asc_ascq, asc_tables[i],
958 asc_tables_size[i],
959 sizeof(**asc_tables),
960 ascentrycomp);
961
962 if (found_entry) {
963 *asc_entry = (struct asc_table_entry *)found_entry;
964 break;
965 }
966 }
967
968 for (i = 0; i < num_sense_tables; i++) {
969 void *found_entry;
970
971 found_entry = scsibsearch(&sense_key, sense_tables[i],
972 sense_tables_size[i],
973 sizeof(**sense_tables),
974 senseentrycomp);
975
976 if (found_entry) {
977 *sense_entry =
978 (struct sense_key_table_entry *)found_entry;
979 break;
980 }
981 }
982}
983
984static void *
985scsibsearch(const void *key, const void *base, size_t nmemb, size_t size,
986 int (*compar)(const void *, const void *))
987{
988 const void *entry;
989 u_int l;
990 u_int u;
991 u_int m;
992
993 l = -1;
994 u = nmemb;
995 while (l + 1 != u) {
996 m = (l + u) / 2;
997 entry = base + m * size;
998 if (compar(key, entry) > 0)
999 l = m;
1000 else
1001 u = m;
1002 }
1003
1004 entry = base + u * size;
1005 if (u == nmemb
1006 || compar(key, entry) != 0)
1007 return (NULL);
1008
1009 return ((void *)entry);
1010}
1011
1012/*
1013 * Compare string with pattern, returning 0 on match.
1014 * Short pattern matches trailing blanks in name,
1015 * wildcard '*' in pattern matches rest of name,
1016 * wildcard '?' matches a single non-space character.
1017 */
1018static int
1019cam_strmatch(const uint8_t *str, const uint8_t *pattern, int str_len)
1020{
1021
1022 while (*pattern != '\0'&& str_len > 0) {
1023
1024 if (*pattern == '*') {
1025 return (0);
1026 }
1027 if ((*pattern != *str)
1028 && (*pattern != '?' || *str == ' ')) {
1029 return (1);
1030 }
1031 pattern++;
1032 str++;
1033 str_len--;
1034 }
1035 while (str_len > 0 && *str++ == ' ')
1036 str_len--;
1037
1038 return (str_len);
1039}
1040
1041static caddr_t
1042cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries,
1043 int entry_size, cam_quirkmatch_t *comp_func)
1044{
1045 for (; num_entries > 0; num_entries--, quirk_table += entry_size) {
1046 if ((*comp_func)(target, quirk_table) == 0)
1047 return (quirk_table);
1048 }
1049 return (NULL);
1050}
1051
1052void
1053aic_sense_desc(int sense_key, int asc, int ascq,
1054 struct scsi_inquiry_data *inq_data,
1055 const char **sense_key_desc, const char **asc_desc)
1056{
1057 const struct asc_table_entry *asc_entry;
1058 const struct sense_key_table_entry *sense_entry;
1059
1060 fetchtableentries(sense_key, asc, ascq,
1061 inq_data,
1062 &sense_entry,
1063 &asc_entry);
1064
1065 *sense_key_desc = sense_entry->desc;
1066
1067 if (asc_entry != NULL)
1068 *asc_desc = asc_entry->desc;
1069 else if (asc >= 0x80 && asc <= 0xff)
1070 *asc_desc = "Vendor Specific ASC";
1071 else if (ascq >= 0x80 && ascq <= 0xff)
1072 *asc_desc = "Vendor Specific ASCQ";
1073 else
1074 *asc_desc = "Reserved ASC/ASCQ pair";
1075}
1076
1077/*
1078 * Given sense and device type information, return the appropriate action.
1079 * If we do not understand the specific error as identified by the ASC/ASCQ
1080 * pair, fall back on the more generic actions derived from the sense key.
1081 */
1082aic_sense_action
1083aic_sense_error_action(struct scsi_sense_data *sense_data,
1084 struct scsi_inquiry_data *inq_data, uint32_t sense_flags)
1085{
1086 const struct asc_table_entry *asc_entry;
1087 const struct sense_key_table_entry *sense_entry;
1088 int error_code, sense_key, asc, ascq;
1089 aic_sense_action action;
1090
1091 scsi_extract_sense(sense_data, &error_code, &sense_key, &asc, &ascq);
1092
1093 if (error_code == SSD_DEFERRED_ERROR) {
1094 /*
1095 * XXX dufault@FreeBSD.org
1096 * This error doesn't relate to the command associated
1097 * with this request sense. A deferred error is an error
1098 * for a command that has already returned GOOD status
1099 * (see SCSI2 8.2.14.2).
1100 *
1101 * By my reading of that section, it looks like the current
1102 * command has been cancelled, we should now clean things up
1103 * (hopefully recovering any lost data) and then retry the
1104 * current command. There are two easy choices, both wrong:
1105 *
1106 * 1. Drop through (like we had been doing), thus treating
1107 * this as if the error were for the current command and
1108 * return and stop the current command.
1109 *
1110 * 2. Issue a retry (like I made it do) thus hopefully
1111 * recovering the current transfer, and ignoring the
1112 * fact that we've dropped a command.
1113 *
1114 * These should probably be handled in a device specific
1115 * sense handler or punted back up to a user mode daemon
1116 */
1117 action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE;
1118 } else {
1119 fetchtableentries(sense_key, asc, ascq,
1120 inq_data,
1121 &sense_entry,
1122 &asc_entry);
1123
1124 /*
1125 * Override the 'No additional Sense' entry (0,0)
1126 * with the error action of the sense key.
1127 */
1128 if (asc_entry != NULL
1129 && (asc != 0 || ascq != 0))
1130 action = asc_entry->action;
1131 else
1132 action = sense_entry->action;
1133
1134 if (sense_key == SSD_KEY_RECOVERED_ERROR) {
1135 /*
1136 * The action succeeded but the device wants
1137 * the user to know that some recovery action
1138 * was required.
1139 */
1140 action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK);
1141 action |= SS_NOP|SSQ_PRINT_SENSE;
1142 } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) {
1143 if ((sense_flags & SF_QUIET_IR) != 0)
1144 action &= ~SSQ_PRINT_SENSE;
1145 } else if (sense_key == SSD_KEY_UNIT_ATTENTION) {
1146 if ((sense_flags & SF_RETRY_UA) != 0
1147 && (action & SS_MASK) == SS_FAIL) {
1148 action &= ~(SS_MASK|SSQ_MASK);
1149 action |= SS_RETRY|SSQ_DECREMENT_COUNT|
1150 SSQ_PRINT_SENSE;
1151 }
1152 }
1153 }
1154
1155 if ((sense_flags & SF_PRINT_ALWAYS) != 0)
1156 action |= SSQ_PRINT_SENSE;
1157 else if ((sense_flags & SF_NO_PRINT) != 0)
1158 action &= ~SSQ_PRINT_SENSE;
1159
1160 return (action);
1161}
1162
1163/*
1164 * Try make as good a match as possible with
1165 * available sub drivers
1166 */
1167int
1168aic_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
1169{
1170 struct scsi_inquiry_pattern *entry;
1171 struct scsi_inquiry_data *inq;
1172
1173 entry = (struct scsi_inquiry_pattern *)table_entry;
1174 inq = (struct scsi_inquiry_data *)inqbuffer;
1175
1176 if (((SID_TYPE(inq) == entry->type)
1177 || (entry->type == T_ANY))
1178 && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
1179 : entry->media_type & SIP_MEDIA_FIXED)
1180 && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
1181 && (cam_strmatch(inq->product, entry->product,
1182 sizeof(inq->product)) == 0)
1183 && (cam_strmatch(inq->revision, entry->revision,
1184 sizeof(inq->revision)) == 0)) {
1185 return (0);
1186 }
1187 return (-1);
1188}
1189
1190/*
1191 * Table of syncrates that don't follow the "divisible by 4"
1192 * rule. This table will be expanded in future SCSI specs.
1193 */
1194static struct {
1195 u_int period_factor;
1196 u_int period; /* in 100ths of ns */
1197} scsi_syncrates[] = {
1198 { 0x08, 625 }, /* FAST-160 */
1199 { 0x09, 1250 }, /* FAST-80 */
1200 { 0x0a, 2500 }, /* FAST-40 40MHz */
1201 { 0x0b, 3030 }, /* FAST-40 33MHz */
1202 { 0x0c, 5000 } /* FAST-20 */
1203};
1204
1205/*
1206 * Return the frequency in kHz corresponding to the given
1207 * sync period factor.
1208 */
1209u_int
1210aic_calc_syncsrate(u_int period_factor)
1211{
1212 int i;
1213 int num_syncrates;
1214
1215 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
1216 /* See if the period is in the "exception" table */
1217 for (i = 0; i < num_syncrates; i++) {
1218
1219 if (period_factor == scsi_syncrates[i].period_factor) {
1220 /* Period in kHz */
1221 return (100000000 / scsi_syncrates[i].period);
1222 }
1223 }
1224
1225 /*
1226 * Wasn't in the table, so use the standard
1227 * 4 times conversion.
1228 */
1229 return (10000000 / (period_factor * 4 * 10));
1230}
1231
1232/*
1233 * Return speed in KB/s.
1234 */
1235u_int
1236aic_calc_speed(u_int width, u_int period, u_int offset, u_int min_rate)
1237{
1238 u_int freq;
1239
1240 if (offset != 0 && period < min_rate)
1241 freq = aic_calc_syncsrate(period);
1242 else
1243 /* Roughly 3.3MB/s for async */
1244 freq = 3300;
1245 freq <<= width;
1246 return (freq);
1247}
1248
1249uint32_t
1250aic_error_action(struct scsi_cmnd *cmd, struct scsi_inquiry_data *inq_data,
1251 cam_status status, u_int scsi_status)
1252{
1253 aic_sense_action err_action;
1254 int sense;
1255
1256 sense = (cmd->result >> 24) == DRIVER_SENSE;
1257
1258 switch (status) {
1259 case CAM_REQ_CMP:
1260 err_action = SS_NOP;
1261 break;
1262 case CAM_AUTOSENSE_FAIL:
1263 case CAM_SCSI_STATUS_ERROR:
1264
1265 switch (scsi_status) {
1266 case SCSI_STATUS_OK:
1267 case SCSI_STATUS_COND_MET:
1268 case SCSI_STATUS_INTERMED:
1269 case SCSI_STATUS_INTERMED_COND_MET:
1270 err_action = SS_NOP;
1271 break;
1272 case SCSI_STATUS_CMD_TERMINATED:
1273 case SCSI_STATUS_CHECK_COND:
1274 if (sense != 0) {
1275 struct scsi_sense_data *sense;
1276
1277 sense = (struct scsi_sense_data *)
1278 &cmd->sense_buffer;
1279 err_action =
1280 aic_sense_error_action(sense, inq_data, 0);
1281
1282 } else {
1283 err_action = SS_RETRY|SSQ_FALLBACK
1284 | SSQ_DECREMENT_COUNT|EIO;
1285 }
1286 break;
1287 case SCSI_STATUS_QUEUE_FULL:
1288 case SCSI_STATUS_BUSY:
1289 err_action = SS_RETRY|SSQ_DELAY|SSQ_MANY
1290 | SSQ_DECREMENT_COUNT|EBUSY;
1291 break;
1292 case SCSI_STATUS_RESERV_CONFLICT:
1293 default:
1294 err_action = SS_FAIL|EBUSY;
1295 break;
1296 }
1297 break;
1298 case CAM_CMD_TIMEOUT:
1299 case CAM_REQ_CMP_ERR:
1300 case CAM_UNEXP_BUSFREE:
1301 case CAM_UNCOR_PARITY:
1302 case CAM_DATA_RUN_ERR:
1303 err_action = SS_RETRY|SSQ_FALLBACK|EIO;
1304 break;
1305 case CAM_UA_ABORT:
1306 case CAM_UA_TERMIO:
1307 case CAM_MSG_REJECT_REC:
1308 case CAM_SEL_TIMEOUT:
1309 err_action = SS_FAIL|EIO;
1310 break;
1311 case CAM_REQ_INVALID:
1312 case CAM_PATH_INVALID:
1313 case CAM_DEV_NOT_THERE:
1314 case CAM_NO_HBA:
1315 case CAM_PROVIDE_FAIL:
1316 case CAM_REQ_TOO_BIG:
1317 case CAM_RESRC_UNAVAIL:
1318 case CAM_BUSY:
1319 default:
1320 /* panic?? These should never occur in our application. */
1321 err_action = SS_FAIL|EIO;
1322 break;
1323 case CAM_SCSI_BUS_RESET:
1324 case CAM_BDR_SENT:
1325 case CAM_REQUEUE_REQ:
1326 /* Unconditional requeue */
1327 err_action = SS_RETRY;
1328 break;
1329 }
1330
1331 return (err_action);
1332}
1333
1334char *
1335aic_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
1336 aic_option_callback_t *callback, u_long callback_arg)
1337{
1338 char *tok_end;
1339 char *tok_end2;
1340 int i;
1341 int instance;
1342 int targ;
1343 int done;
1344 char tok_list[] = {'.', ',', '{', '}', '\0'};
1345 34
1346 /* All options use a ':' name/arg separator */
1347 if (*opt_arg != ':')
1348 return (opt_arg);
1349 opt_arg++;
1350 instance = -1;
1351 targ = -1;
1352 done = FALSE;
1353 /*
1354 * Restore separator that may be in
1355 * the middle of our option argument.
1356 */
1357 tok_end = strchr(opt_arg, '\0');
1358 if (tok_end < end)
1359 *tok_end = ',';
1360 while (!done) {
1361 switch (*opt_arg) {
1362 case '{':
1363 if (instance == -1) {
1364 instance = 0;
1365 } else {
1366 if (depth > 1) {
1367 if (targ == -1)
1368 targ = 0;
1369 } else {
1370 printf("Malformed Option %s\n",
1371 opt_name);
1372 done = TRUE;
1373 }
1374 }
1375 opt_arg++;
1376 break;
1377 case '}':
1378 if (targ != -1)
1379 targ = -1;
1380 else if (instance != -1)
1381 instance = -1;
1382 opt_arg++;
1383 break;
1384 case ',':
1385 case '.':
1386 if (instance == -1)
1387 done = TRUE;
1388 else if (targ >= 0)
1389 targ++;
1390 else if (instance >= 0)
1391 instance++;
1392 opt_arg++;
1393 break;
1394 case '\0':
1395 done = TRUE;
1396 break;
1397 default:
1398 tok_end = end;
1399 for (i = 0; tok_list[i]; i++) {
1400 tok_end2 = strchr(opt_arg, tok_list[i]);
1401 if ((tok_end2) && (tok_end2 < tok_end))
1402 tok_end = tok_end2;
1403 }
1404 callback(callback_arg, instance, targ,
1405 simple_strtol(opt_arg, NULL, 0));
1406 opt_arg = tok_end;
1407 break;
1408 }
1409 }
1410 return (opt_arg);
1411}
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
index bfe6f954d3c4..3bfbf0fe1ec2 100644
--- a/drivers/scsi/aic7xxx/aiclib.h
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -57,121 +57,6 @@
57#ifndef _AICLIB_H 57#ifndef _AICLIB_H
58#define _AICLIB_H 58#define _AICLIB_H
59 59
60/*
61 * Linux Interrupt Support.
62 */
63#ifndef IRQ_RETVAL
64typedef void irqreturn_t;
65#define IRQ_RETVAL(x)
66#endif
67
68/*
69 * SCSI command format
70 */
71
72/*
73 * Define dome bits that are in ALL (or a lot of) scsi commands
74 */
75#define SCSI_CTL_LINK 0x01
76#define SCSI_CTL_FLAG 0x02
77#define SCSI_CTL_VENDOR 0xC0
78#define SCSI_CMD_LUN 0xA0 /* these two should not be needed */
79#define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */
80
81#define SCSI_MAX_CDBLEN 16 /*
82 * 16 byte commands are in the
83 * SCSI-3 spec
84 */
85/* 6byte CDBs special case 0 length to be 256 */
86#define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len)
87
88/*
89 * This type defines actions to be taken when a particular sense code is
90 * received. Right now, these flags are only defined to take up 16 bits,
91 * but can be expanded in the future if necessary.
92 */
93typedef enum {
94 SS_NOP = 0x000000, /* Do nothing */
95 SS_RETRY = 0x010000, /* Retry the command */
96 SS_FAIL = 0x020000, /* Bail out */
97 SS_START = 0x030000, /* Send a Start Unit command to the device,
98 * then retry the original command.
99 */
100 SS_TUR = 0x040000, /* Send a Test Unit Ready command to the
101 * device, then retry the original command.
102 */
103 SS_REQSENSE = 0x050000, /* Send a RequestSense command to the
104 * device, then retry the original command.
105 */
106 SS_INQ_REFRESH = 0x060000,
107 SS_MASK = 0xff0000
108} aic_sense_action;
109
110typedef enum {
111 SSQ_NONE = 0x0000,
112 SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */
113 SSQ_MANY = 0x0200, /* send lots of recovery commands */
114 SSQ_RANGE = 0x0400, /*
115 * This table entry represents the
116 * end of a range of ASCQs that
117 * have identical error actions
118 * and text.
119 */
120 SSQ_PRINT_SENSE = 0x0800,
121 SSQ_DELAY = 0x1000, /* Delay before retry. */
122 SSQ_DELAY_RANDOM = 0x2000, /* Randomized delay before retry. */
123 SSQ_FALLBACK = 0x4000, /* Do a speed fallback to recover */
124 SSQ_MASK = 0xff00
125} aic_sense_action_qualifier;
126
127/* Mask for error status values */
128#define SS_ERRMASK 0xff
129
130/* The default, retyable, error action */
131#define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
132
133/* The retyable, error action, with table specified error code */
134#define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
135
136/* Fatal error action, with table specified error code */
137#define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE
138
139struct scsi_generic
140{
141 uint8_t opcode;
142 uint8_t bytes[11];
143};
144
145struct scsi_request_sense
146{
147 uint8_t opcode;
148 uint8_t byte2;
149 uint8_t unused[2];
150 uint8_t length;
151 uint8_t control;
152};
153
154struct scsi_test_unit_ready
155{
156 uint8_t opcode;
157 uint8_t byte2;
158 uint8_t unused[3];
159 uint8_t control;
160};
161
162struct scsi_send_diag
163{
164 uint8_t opcode;
165 uint8_t byte2;
166#define SSD_UOL 0x01
167#define SSD_DOL 0x02
168#define SSD_SELFTEST 0x04
169#define SSD_PF 0x10
170 uint8_t unused[1];
171 uint8_t paramlen[2];
172 uint8_t control;
173};
174
175struct scsi_sense 60struct scsi_sense
176{ 61{
177 uint8_t opcode; 62 uint8_t opcode;
@@ -181,537 +66,12 @@ struct scsi_sense
181 uint8_t control; 66 uint8_t control;
182}; 67};
183 68
184struct scsi_inquiry
185{
186 uint8_t opcode;
187 uint8_t byte2;
188#define SI_EVPD 0x01
189 uint8_t page_code;
190 uint8_t reserved;
191 uint8_t length;
192 uint8_t control;
193};
194
195struct scsi_mode_sense_6
196{
197 uint8_t opcode;
198 uint8_t byte2;
199#define SMS_DBD 0x08
200 uint8_t page;
201#define SMS_PAGE_CODE 0x3F
202#define SMS_VENDOR_SPECIFIC_PAGE 0x00
203#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
204#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
205#define SMS_CONTROL_MODE_PAGE 0x0A
206#define SMS_ALL_PAGES_PAGE 0x3F
207#define SMS_PAGE_CTRL_MASK 0xC0
208#define SMS_PAGE_CTRL_CURRENT 0x00
209#define SMS_PAGE_CTRL_CHANGEABLE 0x40
210#define SMS_PAGE_CTRL_DEFAULT 0x80
211#define SMS_PAGE_CTRL_SAVED 0xC0
212 uint8_t unused;
213 uint8_t length;
214 uint8_t control;
215};
216
217struct scsi_mode_sense_10
218{
219 uint8_t opcode;
220 uint8_t byte2; /* same bits as small version */
221 uint8_t page; /* same bits as small version */
222 uint8_t unused[4];
223 uint8_t length[2];
224 uint8_t control;
225};
226
227struct scsi_mode_select_6
228{
229 uint8_t opcode;
230 uint8_t byte2;
231#define SMS_SP 0x01
232#define SMS_PF 0x10
233 uint8_t unused[2];
234 uint8_t length;
235 uint8_t control;
236};
237
238struct scsi_mode_select_10
239{
240 uint8_t opcode;
241 uint8_t byte2; /* same bits as small version */
242 uint8_t unused[5];
243 uint8_t length[2];
244 uint8_t control;
245};
246
247/*
248 * When sending a mode select to a tape drive, the medium type must be 0.
249 */
250struct scsi_mode_hdr_6
251{
252 uint8_t datalen;
253 uint8_t medium_type;
254 uint8_t dev_specific;
255 uint8_t block_descr_len;
256};
257
258struct scsi_mode_hdr_10
259{
260 uint8_t datalen[2];
261 uint8_t medium_type;
262 uint8_t dev_specific;
263 uint8_t reserved[2];
264 uint8_t block_descr_len[2];
265};
266
267struct scsi_mode_block_descr
268{
269 uint8_t density_code;
270 uint8_t num_blocks[3];
271 uint8_t reserved;
272 uint8_t block_len[3];
273};
274
275struct scsi_log_sense
276{
277 uint8_t opcode;
278 uint8_t byte2;
279#define SLS_SP 0x01
280#define SLS_PPC 0x02
281 uint8_t page;
282#define SLS_PAGE_CODE 0x3F
283#define SLS_ALL_PAGES_PAGE 0x00
284#define SLS_OVERRUN_PAGE 0x01
285#define SLS_ERROR_WRITE_PAGE 0x02
286#define SLS_ERROR_READ_PAGE 0x03
287#define SLS_ERROR_READREVERSE_PAGE 0x04
288#define SLS_ERROR_VERIFY_PAGE 0x05
289#define SLS_ERROR_NONMEDIUM_PAGE 0x06
290#define SLS_ERROR_LASTN_PAGE 0x07
291#define SLS_PAGE_CTRL_MASK 0xC0
292#define SLS_PAGE_CTRL_THRESHOLD 0x00
293#define SLS_PAGE_CTRL_CUMULATIVE 0x40
294#define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80
295#define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0
296 uint8_t reserved[2];
297 uint8_t paramptr[2];
298 uint8_t length[2];
299 uint8_t control;
300};
301
302struct scsi_log_select
303{
304 uint8_t opcode;
305 uint8_t byte2;
306/* SLS_SP 0x01 */
307#define SLS_PCR 0x02
308 uint8_t page;
309/* SLS_PAGE_CTRL_MASK 0xC0 */
310/* SLS_PAGE_CTRL_THRESHOLD 0x00 */
311/* SLS_PAGE_CTRL_CUMULATIVE 0x40 */
312/* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */
313/* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */
314 uint8_t reserved[4];
315 uint8_t length[2];
316 uint8_t control;
317};
318
319struct scsi_log_header
320{
321 uint8_t page;
322 uint8_t reserved;
323 uint8_t datalen[2];
324};
325
326struct scsi_log_param_header {
327 uint8_t param_code[2];
328 uint8_t param_control;
329#define SLP_LP 0x01
330#define SLP_LBIN 0x02
331#define SLP_TMC_MASK 0x0C
332#define SLP_TMC_ALWAYS 0x00
333#define SLP_TMC_EQUAL 0x04
334#define SLP_TMC_NOTEQUAL 0x08
335#define SLP_TMC_GREATER 0x0C
336#define SLP_ETC 0x10
337#define SLP_TSD 0x20
338#define SLP_DS 0x40
339#define SLP_DU 0x80
340 uint8_t param_len;
341};
342
343struct scsi_control_page {
344 uint8_t page_code;
345 uint8_t page_length;
346 uint8_t rlec;
347#define SCB_RLEC 0x01 /*Report Log Exception Cond*/
348 uint8_t queue_flags;
349#define SCP_QUEUE_ALG_MASK 0xF0
350#define SCP_QUEUE_ALG_RESTRICTED 0x00
351#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
352#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
353#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
354 uint8_t eca_and_aen;
355#define SCP_EECA 0x80 /*Enable Extended CA*/
356#define SCP_RAENP 0x04 /*Ready AEN Permission*/
357#define SCP_UAAENP 0x02 /*UA AEN Permission*/
358#define SCP_EAENP 0x01 /*Error AEN Permission*/
359 uint8_t reserved;
360 uint8_t aen_holdoff_period[2];
361};
362
363struct scsi_reserve
364{
365 uint8_t opcode;
366 uint8_t byte2;
367 uint8_t unused[2];
368 uint8_t length;
369 uint8_t control;
370};
371
372struct scsi_release
373{
374 uint8_t opcode;
375 uint8_t byte2;
376 uint8_t unused[2];
377 uint8_t length;
378 uint8_t control;
379};
380
381struct scsi_prevent
382{
383 uint8_t opcode;
384 uint8_t byte2;
385 uint8_t unused[2];
386 uint8_t how;
387 uint8_t control;
388};
389#define PR_PREVENT 0x01
390#define PR_ALLOW 0x00
391
392struct scsi_sync_cache
393{
394 uint8_t opcode;
395 uint8_t byte2;
396 uint8_t begin_lba[4];
397 uint8_t reserved;
398 uint8_t lb_count[2];
399 uint8_t control;
400};
401
402
403struct scsi_changedef
404{
405 uint8_t opcode;
406 uint8_t byte2;
407 uint8_t unused1;
408 uint8_t how;
409 uint8_t unused[4];
410 uint8_t datalen;
411 uint8_t control;
412};
413
414struct scsi_read_buffer
415{
416 uint8_t opcode;
417 uint8_t byte2;
418#define RWB_MODE 0x07
419#define RWB_MODE_HDR_DATA 0x00
420#define RWB_MODE_DATA 0x02
421#define RWB_MODE_DOWNLOAD 0x04
422#define RWB_MODE_DOWNLOAD_SAVE 0x05
423 uint8_t buffer_id;
424 uint8_t offset[3];
425 uint8_t length[3];
426 uint8_t control;
427};
428
429struct scsi_write_buffer
430{
431 uint8_t opcode;
432 uint8_t byte2;
433 uint8_t buffer_id;
434 uint8_t offset[3];
435 uint8_t length[3];
436 uint8_t control;
437};
438
439struct scsi_rw_6
440{
441 uint8_t opcode;
442 uint8_t addr[3];
443/* only 5 bits are valid in the MSB address byte */
444#define SRW_TOPADDR 0x1F
445 uint8_t length;
446 uint8_t control;
447};
448
449struct scsi_rw_10
450{
451 uint8_t opcode;
452#define SRW10_RELADDR 0x01
453#define SRW10_FUA 0x08
454#define SRW10_DPO 0x10
455 uint8_t byte2;
456 uint8_t addr[4];
457 uint8_t reserved;
458 uint8_t length[2];
459 uint8_t control;
460};
461
462struct scsi_rw_12
463{
464 uint8_t opcode;
465#define SRW12_RELADDR 0x01
466#define SRW12_FUA 0x08
467#define SRW12_DPO 0x10
468 uint8_t byte2;
469 uint8_t addr[4];
470 uint8_t length[4];
471 uint8_t reserved;
472 uint8_t control;
473};
474
475struct scsi_start_stop_unit
476{
477 uint8_t opcode;
478 uint8_t byte2;
479#define SSS_IMMED 0x01
480 uint8_t reserved[2];
481 uint8_t how;
482#define SSS_START 0x01
483#define SSS_LOEJ 0x02
484 uint8_t control;
485};
486
487#define SC_SCSI_1 0x01
488#define SC_SCSI_2 0x03
489
490/*
491 * Opcodes
492 */
493
494#define TEST_UNIT_READY 0x00
495#define REQUEST_SENSE 0x03
496#define READ_6 0x08
497#define WRITE_6 0x0a
498#define INQUIRY 0x12
499#define MODE_SELECT_6 0x15
500#define MODE_SENSE_6 0x1a
501#define START_STOP_UNIT 0x1b
502#define START_STOP 0x1b
503#define RESERVE 0x16
504#define RELEASE 0x17
505#define RECEIVE_DIAGNOSTIC 0x1c
506#define SEND_DIAGNOSTIC 0x1d
507#define PREVENT_ALLOW 0x1e
508#define READ_CAPACITY 0x25
509#define READ_10 0x28
510#define WRITE_10 0x2a
511#define POSITION_TO_ELEMENT 0x2b
512#define SYNCHRONIZE_CACHE 0x35
513#define WRITE_BUFFER 0x3b
514#define READ_BUFFER 0x3c
515#define CHANGE_DEFINITION 0x40
516#define LOG_SELECT 0x4c
517#define LOG_SENSE 0x4d
518#ifdef XXXCAM
519#define MODE_SENSE_10 0x5A
520#endif
521#define MODE_SELECT_10 0x55
522#define MOVE_MEDIUM 0xa5
523#define READ_12 0xa8
524#define WRITE_12 0xaa
525#define READ_ELEMENT_STATUS 0xb8
526
527
528/*
529 * Device Types
530 */
531#define T_DIRECT 0x00
532#define T_SEQUENTIAL 0x01
533#define T_PRINTER 0x02
534#define T_PROCESSOR 0x03
535#define T_WORM 0x04
536#define T_CDROM 0x05
537#define T_SCANNER 0x06
538#define T_OPTICAL 0x07
539#define T_CHANGER 0x08
540#define T_COMM 0x09
541#define T_ASC0 0x0a
542#define T_ASC1 0x0b
543#define T_STORARRAY 0x0c
544#define T_ENCLOSURE 0x0d
545#define T_RBC 0x0e
546#define T_OCRW 0x0f
547#define T_NODEVICE 0x1F
548#define T_ANY 0xFF /* Used in Quirk table matches */
549
550#define T_REMOV 1
551#define T_FIXED 0
552
553/*
554 * This length is the initial inquiry length used by the probe code, as
555 * well as the legnth necessary for aic_print_inquiry() to function
556 * correctly. If either use requires a different length in the future,
557 * the two values should be de-coupled.
558 */
559#define SHORT_INQUIRY_LENGTH 36
560
561struct scsi_inquiry_data
562{
563 uint8_t device;
564#define SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
565#define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
566#define SID_QUAL_LU_CONNECTED 0x00 /*
567 * The specified peripheral device
568 * type is currently connected to
569 * logical unit. If the target cannot
570 * determine whether or not a physical
571 * device is currently connected, it
572 * shall also use this peripheral
573 * qualifier when returning the INQUIRY
574 * data. This peripheral qualifier
575 * does not mean that the device is
576 * ready for access by the initiator.
577 */
578#define SID_QUAL_LU_OFFLINE 0x01 /*
579 * The target is capable of supporting
580 * the specified peripheral device type
581 * on this logical unit; however, the
582 * physical device is not currently
583 * connected to this logical unit.
584 */
585#define SID_QUAL_RSVD 0x02
586#define SID_QUAL_BAD_LU 0x03 /*
587 * The target is not capable of
588 * supporting a physical device on
589 * this logical unit. For this
590 * peripheral qualifier the peripheral
591 * device type shall be set to 1Fh to
592 * provide compatibility with previous
593 * versions of SCSI. All other
594 * peripheral device type values are
595 * reserved for this peripheral
596 * qualifier.
597 */
598#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
599 uint8_t dev_qual2;
600#define SID_QUAL2 0x7F
601#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
602 uint8_t version;
603#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
604#define SCSI_REV_0 0 69#define SCSI_REV_0 0
605#define SCSI_REV_CCS 1 70#define SCSI_REV_CCS 1
606#define SCSI_REV_2 2 71#define SCSI_REV_2 2
607#define SCSI_REV_SPC 3 72#define SCSI_REV_SPC 3
608#define SCSI_REV_SPC2 4 73#define SCSI_REV_SPC2 4
609 74
610#define SID_ECMA 0x38
611#define SID_ISO 0xC0
612 uint8_t response_format;
613#define SID_AENC 0x80
614#define SID_TrmIOP 0x40
615 uint8_t additional_length;
616 uint8_t reserved[2];
617 uint8_t flags;
618#define SID_SftRe 0x01
619#define SID_CmdQue 0x02
620#define SID_Linked 0x08
621#define SID_Sync 0x10
622#define SID_WBus16 0x20
623#define SID_WBus32 0x40
624#define SID_RelAdr 0x80
625#define SID_VENDOR_SIZE 8
626 char vendor[SID_VENDOR_SIZE];
627#define SID_PRODUCT_SIZE 16
628 char product[SID_PRODUCT_SIZE];
629#define SID_REVISION_SIZE 4
630 char revision[SID_REVISION_SIZE];
631 /*
632 * The following fields were taken from SCSI Primary Commands - 2
633 * (SPC-2) Revision 14, Dated 11 November 1999
634 */
635#define SID_VENDOR_SPECIFIC_0_SIZE 20
636 uint8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE];
637 /*
638 * An extension of SCSI Parallel Specific Values
639 */
640#define SID_SPI_IUS 0x01
641#define SID_SPI_QAS 0x02
642#define SID_SPI_CLOCK_ST 0x00
643#define SID_SPI_CLOCK_DT 0x04
644#define SID_SPI_CLOCK_DT_ST 0x0C
645#define SID_SPI_MASK 0x0F
646 uint8_t spi3data;
647 uint8_t reserved2;
648 /*
649 * Version Descriptors, stored 2 byte values.
650 */
651 uint8_t version1[2];
652 uint8_t version2[2];
653 uint8_t version3[2];
654 uint8_t version4[2];
655 uint8_t version5[2];
656 uint8_t version6[2];
657 uint8_t version7[2];
658 uint8_t version8[2];
659
660 uint8_t reserved3[22];
661
662#define SID_VENDOR_SPECIFIC_1_SIZE 160
663 uint8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE];
664};
665
666struct scsi_vpd_unit_serial_number
667{
668 uint8_t device;
669 uint8_t page_code;
670#define SVPD_UNIT_SERIAL_NUMBER 0x80
671 uint8_t reserved;
672 uint8_t length; /* serial number length */
673#define SVPD_SERIAL_NUM_SIZE 251
674 uint8_t serial_num[SVPD_SERIAL_NUM_SIZE];
675};
676
677struct scsi_read_capacity
678{
679 uint8_t opcode;
680 uint8_t byte2;
681 uint8_t addr[4];
682 uint8_t unused[3];
683 uint8_t control;
684};
685
686struct scsi_read_capacity_data
687{
688 uint8_t addr[4];
689 uint8_t length[4];
690};
691
692struct scsi_report_luns
693{
694 uint8_t opcode;
695 uint8_t byte2;
696 uint8_t unused[3];
697 uint8_t addr[4];
698 uint8_t control;
699};
700
701struct scsi_report_luns_data {
702 uint8_t length[4]; /* length of LUN inventory, in bytes */
703 uint8_t reserved[4]; /* unused */
704 /*
705 * LUN inventory- we only support the type zero form for now.
706 */
707 struct {
708 uint8_t lundata[8];
709 } luns[1];
710};
711#define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */
712#define RPL_LUNDATA_T0LUN 1 /* @ lundata[1] */
713
714
715struct scsi_sense_data 75struct scsi_sense_data
716{ 76{
717 uint8_t error_code; 77 uint8_t error_code;
@@ -757,41 +117,6 @@ struct scsi_sense_data
757#define SSD_FULL_SIZE sizeof(struct scsi_sense_data) 117#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
758}; 118};
759 119
760struct scsi_mode_header_6
761{
762 uint8_t data_length; /* Sense data length */
763 uint8_t medium_type;
764 uint8_t dev_spec;
765 uint8_t blk_desc_len;
766};
767
768struct scsi_mode_header_10
769{
770 uint8_t data_length[2];/* Sense data length */
771 uint8_t medium_type;
772 uint8_t dev_spec;
773 uint8_t unused[2];
774 uint8_t blk_desc_len[2];
775};
776
777struct scsi_mode_page_header
778{
779 uint8_t page_code;
780 uint8_t page_length;
781};
782
783struct scsi_mode_blk_desc
784{
785 uint8_t density;
786 uint8_t nblocks[3];
787 uint8_t reserved;
788 uint8_t blklen[3];
789};
790
791#define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */
792#define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */
793
794
795/* 120/*
796 * Status Byte 121 * Status Byte
797 */ 122 */
@@ -807,76 +132,7 @@ struct scsi_mode_blk_desc
807#define SCSI_STATUS_ACA_ACTIVE 0x30 132#define SCSI_STATUS_ACA_ACTIVE 0x30
808#define SCSI_STATUS_TASK_ABORTED 0x40 133#define SCSI_STATUS_TASK_ABORTED 0x40
809 134
810struct scsi_inquiry_pattern {
811 uint8_t type;
812 uint8_t media_type;
813#define SIP_MEDIA_REMOVABLE 0x01
814#define SIP_MEDIA_FIXED 0x02
815 const char *vendor;
816 const char *product;
817 const char *revision;
818};
819
820struct scsi_static_inquiry_pattern {
821 uint8_t type;
822 uint8_t media_type;
823 char vendor[SID_VENDOR_SIZE+1];
824 char product[SID_PRODUCT_SIZE+1];
825 char revision[SID_REVISION_SIZE+1];
826};
827
828struct scsi_sense_quirk_entry {
829 struct scsi_inquiry_pattern inq_pat;
830 int num_sense_keys;
831 int num_ascs;
832 struct sense_key_table_entry *sense_key_info;
833 struct asc_table_entry *asc_info;
834};
835
836struct sense_key_table_entry {
837 uint8_t sense_key;
838 uint32_t action;
839 const char *desc;
840};
841
842struct asc_table_entry {
843 uint8_t asc;
844 uint8_t ascq;
845 uint32_t action;
846 const char *desc;
847};
848
849struct op_table_entry {
850 uint8_t opcode;
851 uint16_t opmask;
852 const char *desc;
853};
854
855struct scsi_op_quirk_entry {
856 struct scsi_inquiry_pattern inq_pat;
857 int num_ops;
858 struct op_table_entry *op_table;
859};
860
861typedef enum {
862 SSS_FLAG_NONE = 0x00,
863 SSS_FLAG_PRINT_COMMAND = 0x01
864} scsi_sense_string_flags;
865
866extern const char *scsi_sense_key_text[];
867
868/************************* Large Disk Handling ********************************/ 135/************************* Large Disk Handling ********************************/
869#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
870static __inline int aic_sector_div(u_long capacity, int heads, int sectors);
871
872static __inline int
873aic_sector_div(u_long capacity, int heads, int sectors)
874{
875 return (capacity / (heads * sectors));
876}
877#else
878static __inline int aic_sector_div(sector_t capacity, int heads, int sectors);
879
880static __inline int 136static __inline int
881aic_sector_div(sector_t capacity, int heads, int sectors) 137aic_sector_div(sector_t capacity, int heads, int sectors)
882{ 138{
@@ -884,152 +140,6 @@ aic_sector_div(sector_t capacity, int heads, int sectors)
884 sector_div(capacity, (heads * sectors)); 140 sector_div(capacity, (heads * sectors));
885 return (int)capacity; 141 return (int)capacity;
886} 142}
887#endif
888
889/**************************** Module Library Hack *****************************/
890/*
891 * What we'd like to do is have a single "scsi library" module that both the
892 * aic7xxx and aic79xx drivers could load and depend on. A cursory examination
893 * of implementing module dependencies in Linux (handling the install and
894 * initrd cases) does not look promissing. For now, we just duplicate this
895 * code in both drivers using a simple symbol renaming scheme that hides this
896 * hack from the drivers.
897 */
898#define AIC_LIB_ENTRY_CONCAT(x, prefix) prefix ## x
899#define AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix)
900#define AIC_LIB_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX)
901
902#define aic_sense_desc AIC_LIB_ENTRY(_sense_desc)
903#define aic_sense_error_action AIC_LIB_ENTRY(_sense_error_action)
904#define aic_error_action AIC_LIB_ENTRY(_error_action)
905#define aic_op_desc AIC_LIB_ENTRY(_op_desc)
906#define aic_cdb_string AIC_LIB_ENTRY(_cdb_string)
907#define aic_print_inquiry AIC_LIB_ENTRY(_print_inquiry)
908#define aic_calc_syncsrate AIC_LIB_ENTRY(_calc_syncrate)
909#define aic_calc_syncparam AIC_LIB_ENTRY(_calc_syncparam)
910#define aic_calc_speed AIC_LIB_ENTRY(_calc_speed)
911#define aic_inquiry_match AIC_LIB_ENTRY(_inquiry_match)
912#define aic_static_inquiry_match AIC_LIB_ENTRY(_static_inquiry_match)
913#define aic_parse_brace_option AIC_LIB_ENTRY(_parse_brace_option)
914
915/******************************************************************************/
916
917void aic_sense_desc(int /*sense_key*/, int /*asc*/,
918 int /*ascq*/, struct scsi_inquiry_data*,
919 const char** /*sense_key_desc*/,
920 const char** /*asc_desc*/);
921aic_sense_action aic_sense_error_action(struct scsi_sense_data*,
922 struct scsi_inquiry_data*,
923 uint32_t /*sense_flags*/);
924uint32_t aic_error_action(struct scsi_cmnd *,
925 struct scsi_inquiry_data *,
926 cam_status, u_int);
927
928#define SF_RETRY_UA 0x01
929#define SF_NO_PRINT 0x02
930#define SF_QUIET_IR 0x04 /* Be quiet about Illegal Request reponses */
931#define SF_PRINT_ALWAYS 0x08
932
933
934const char * aic_op_desc(uint16_t /*opcode*/, struct scsi_inquiry_data*);
935char * aic_cdb_string(uint8_t* /*cdb_ptr*/, char* /*cdb_string*/,
936 size_t /*len*/);
937void aic_print_inquiry(struct scsi_inquiry_data*);
938
939u_int aic_calc_syncsrate(u_int /*period_factor*/);
940u_int aic_calc_syncparam(u_int /*period*/);
941u_int aic_calc_speed(u_int width, u_int period, u_int offset,
942 u_int min_rate);
943
944int aic_inquiry_match(caddr_t /*inqbuffer*/,
945 caddr_t /*table_entry*/);
946int aic_static_inquiry_match(caddr_t /*inqbuffer*/,
947 caddr_t /*table_entry*/);
948
949typedef void aic_option_callback_t(u_long, int, int, int32_t);
950char * aic_parse_brace_option(char *opt_name, char *opt_arg,
951 char *end, int depth,
952 aic_option_callback_t *, u_long);
953
954static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
955 int *error_code, int *sense_key,
956 int *asc, int *ascq);
957static __inline void scsi_ulto2b(uint32_t val, uint8_t *bytes);
958static __inline void scsi_ulto3b(uint32_t val, uint8_t *bytes);
959static __inline void scsi_ulto4b(uint32_t val, uint8_t *bytes);
960static __inline uint32_t scsi_2btoul(uint8_t *bytes);
961static __inline uint32_t scsi_3btoul(uint8_t *bytes);
962static __inline int32_t scsi_3btol(uint8_t *bytes);
963static __inline uint32_t scsi_4btoul(uint8_t *bytes);
964
965static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
966 int *error_code, int *sense_key,
967 int *asc, int *ascq)
968{
969 *error_code = sense->error_code & SSD_ERRCODE;
970 *sense_key = sense->flags & SSD_KEY;
971 *asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
972 *ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
973}
974
975static __inline void
976scsi_ulto2b(uint32_t val, uint8_t *bytes)
977{
978
979 bytes[0] = (val >> 8) & 0xff;
980 bytes[1] = val & 0xff;
981}
982
983static __inline void
984scsi_ulto3b(uint32_t val, uint8_t *bytes)
985{
986
987 bytes[0] = (val >> 16) & 0xff;
988 bytes[1] = (val >> 8) & 0xff;
989 bytes[2] = val & 0xff;
990}
991
992static __inline void
993scsi_ulto4b(uint32_t val, uint8_t *bytes)
994{
995
996 bytes[0] = (val >> 24) & 0xff;
997 bytes[1] = (val >> 16) & 0xff;
998 bytes[2] = (val >> 8) & 0xff;
999 bytes[3] = val & 0xff;
1000}
1001
1002static __inline uint32_t
1003scsi_2btoul(uint8_t *bytes)
1004{
1005 uint32_t rv;
1006
1007 rv = (bytes[0] << 8) |
1008 bytes[1];
1009 return (rv);
1010}
1011
1012static __inline uint32_t
1013scsi_3btoul(uint8_t *bytes)
1014{
1015 uint32_t rv;
1016
1017 rv = (bytes[0] << 16) |
1018 (bytes[1] << 8) |
1019 bytes[2];
1020 return (rv);
1021}
1022
1023static __inline int32_t
1024scsi_3btol(uint8_t *bytes)
1025{
1026 uint32_t rc = scsi_3btoul(bytes);
1027
1028 if (rc & 0x00800000)
1029 rc |= 0xff000000;
1030
1031 return (int32_t) rc;
1032}
1033 143
1034static __inline uint32_t 144static __inline uint32_t
1035scsi_4btoul(uint8_t *bytes) 145scsi_4btoul(uint8_t *bytes)
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fb28c1261848..deec0cef88d9 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -583,8 +583,7 @@ static void pci_enable_intx(struct pci_dev *pdev)
583#define AHCI_ENABLE (1 << 31) 583#define AHCI_ENABLE (1 << 31)
584static int piix_disable_ahci(struct pci_dev *pdev) 584static int piix_disable_ahci(struct pci_dev *pdev)
585{ 585{
586 void *mmio; 586 void __iomem *mmio;
587 unsigned long addr;
588 u32 tmp; 587 u32 tmp;
589 int rc = 0; 588 int rc = 0;
590 589
@@ -592,11 +591,11 @@ static int piix_disable_ahci(struct pci_dev *pdev)
592 * works because this device is usually set up by BIOS. 591 * works because this device is usually set up by BIOS.
593 */ 592 */
594 593
595 addr = pci_resource_start(pdev, AHCI_PCI_BAR); 594 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
596 if (!addr || !pci_resource_len(pdev, AHCI_PCI_BAR)) 595 !pci_resource_len(pdev, AHCI_PCI_BAR))
597 return 0; 596 return 0;
598 597
599 mmio = ioremap(addr, 64); 598 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
600 if (!mmio) 599 if (!mmio)
601 return -ENOMEM; 600 return -ENOMEM;
602 601
@@ -610,7 +609,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
610 rc = -EIO; 609 rc = -EIO;
611 } 610 }
612 611
613 iounmap(mmio); 612 pci_iounmap(pdev, mmio);
614 return rc; 613 return rc;
615} 614}
616 615
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 3900e28ac7d6..bd0e1b6be1ea 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -20,7 +20,6 @@
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/completion.h> 22#include <linux/completion.h>
23#include <linux/devfs_fs_kernel.h>
24#include <linux/ioctl32.h> 23#include <linux/ioctl32.h>
25#include <linux/compat.h> 24#include <linux/compat.h>
26#include <linux/chio.h> /* here are all the ioctls */ 25#include <linux/chio.h> /* here are all the ioctls */
@@ -31,7 +30,7 @@
31#include <scsi/scsi_ioctl.h> 30#include <scsi/scsi_ioctl.h>
32#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
33#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
34#include <scsi/scsi_request.h> 33#include <scsi/scsi_eh.h>
35#include <scsi/scsi_dbg.h> 34#include <scsi/scsi_dbg.h>
36 35
37#define CH_DT_MAX 16 36#define CH_DT_MAX 16
@@ -181,17 +180,17 @@ static struct {
181 180
182/* ------------------------------------------------------------------- */ 181/* ------------------------------------------------------------------- */
183 182
184static int ch_find_errno(unsigned char *sense_buffer) 183static int ch_find_errno(struct scsi_sense_hdr *sshdr)
185{ 184{
186 int i,errno = 0; 185 int i,errno = 0;
187 186
188 /* Check to see if additional sense information is available */ 187 /* Check to see if additional sense information is available */
189 if (sense_buffer[7] > 5 && 188 if (scsi_sense_valid(sshdr) &&
190 sense_buffer[12] != 0) { 189 sshdr->asc != 0) {
191 for (i = 0; err[i].errno != 0; i++) { 190 for (i = 0; err[i].errno != 0; i++) {
192 if (err[i].sense == sense_buffer[ 2] && 191 if (err[i].sense == sshdr->sense_key &&
193 err[i].asc == sense_buffer[12] && 192 err[i].asc == sshdr->asc &&
194 err[i].ascq == sense_buffer[13]) { 193 err[i].ascq == sshdr->ascq) {
195 errno = -err[i].errno; 194 errno = -err[i].errno;
196 break; 195 break;
197 } 196 }
@@ -207,13 +206,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
207 void *buffer, unsigned buflength, 206 void *buffer, unsigned buflength,
208 enum dma_data_direction direction) 207 enum dma_data_direction direction)
209{ 208{
210 int errno, retries = 0, timeout; 209 int errno, retries = 0, timeout, result;
211 struct scsi_request *sr; 210 struct scsi_sense_hdr sshdr;
212 211
213 sr = scsi_allocate_request(ch->device, GFP_KERNEL);
214 if (NULL == sr)
215 return -ENOMEM;
216
217 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) 212 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
218 ? timeout_init : timeout_move; 213 ? timeout_init : timeout_move;
219 214
@@ -224,16 +219,17 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
224 __scsi_print_command(cmd); 219 __scsi_print_command(cmd);
225 } 220 }
226 221
227 scsi_wait_req(sr, cmd, buffer, buflength, 222 result = scsi_execute_req(ch->device, cmd, direction, buffer,
228 timeout * HZ, MAX_RETRIES); 223 buflength, &sshdr, timeout * HZ,
224 MAX_RETRIES);
229 225
230 dprintk("result: 0x%x\n",sr->sr_result); 226 dprintk("result: 0x%x\n",result);
231 if (driver_byte(sr->sr_result) & DRIVER_SENSE) { 227 if (driver_byte(result) & DRIVER_SENSE) {
232 if (debug) 228 if (debug)
233 scsi_print_req_sense(ch->name, sr); 229 scsi_print_sense_hdr(ch->name, &sshdr);
234 errno = ch_find_errno(sr->sr_sense_buffer); 230 errno = ch_find_errno(&sshdr);
235 231
236 switch(sr->sr_sense_buffer[2] & 0xf) { 232 switch(sshdr.sense_key) {
237 case UNIT_ATTENTION: 233 case UNIT_ATTENTION:
238 ch->unit_attention = 1; 234 ch->unit_attention = 1;
239 if (retries++ < 3) 235 if (retries++ < 3)
@@ -241,7 +237,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
241 break; 237 break;
242 } 238 }
243 } 239 }
244 scsi_release_request(sr);
245 return errno; 240 return errno;
246} 241}
247 242
@@ -940,8 +935,6 @@ static int ch_probe(struct device *dev)
940 if (init) 935 if (init)
941 ch_init_elem(ch); 936 ch_init_elem(ch);
942 937
943 devfs_mk_cdev(MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
944 S_IFCHR | S_IRUGO | S_IWUGO, ch->name);
945 class_device_create(ch_sysfs_class, 938 class_device_create(ch_sysfs_class,
946 MKDEV(SCSI_CHANGER_MAJOR,ch->minor), 939 MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
947 dev, "s%s", ch->name); 940 dev, "s%s", ch->name);
@@ -974,7 +967,6 @@ static int ch_remove(struct device *dev)
974 967
975 class_device_destroy(ch_sysfs_class, 968 class_device_destroy(ch_sysfs_class,
976 MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); 969 MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
977 devfs_remove(ch->name);
978 kfree(ch->dt); 970 kfree(ch->dt);
979 kfree(ch); 971 kfree(ch);
980 ch_devcount--; 972 ch_devcount--;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index ec161733a82b..f6be2c1c3942 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -17,6 +17,7 @@
17#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
18#include <scsi/scsi_request.h> 18#include <scsi/scsi_request.h>
19#include <scsi/scsi_eh.h> 19#include <scsi/scsi_eh.h>
20#include <scsi/scsi_dbg.h>
20 21
21 22
22 23
@@ -1155,6 +1156,31 @@ scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
1155 } 1156 }
1156} 1157}
1157 1158
1159void
1160scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
1161{
1162 const char *sense_txt;
1163 /* An example of deferred is when an earlier write to disk cache
1164 * succeeded, but now the disk discovers that it cannot write the
1165 * data to the magnetic media.
1166 */
1167 const char *error = scsi_sense_is_deferred(sshdr) ?
1168 "<<DEFERRED>>" : "Current";
1169 printk(KERN_INFO "%s: %s", name, error);
1170 if (sshdr->response_code >= 0x72)
1171 printk(" [descriptor]");
1172
1173 sense_txt = scsi_sense_key_string(sshdr->sense_key);
1174 if (sense_txt)
1175 printk(": sense key: %s\n", sense_txt);
1176 else
1177 printk(": sense key=0x%x\n", sshdr->sense_key);
1178 printk(KERN_INFO " ");
1179 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1180 printk("\n");
1181}
1182EXPORT_SYMBOL(scsi_print_sense_hdr);
1183
1158/* Print sense information */ 1184/* Print sense information */
1159void 1185void
1160__scsi_print_sense(const char *name, const unsigned char *sense_buffer, 1186__scsi_print_sense(const char *name, const unsigned char *sense_buffer,
@@ -1162,8 +1188,6 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1162{ 1188{
1163 int k, num, res; 1189 int k, num, res;
1164 unsigned int info; 1190 unsigned int info;
1165 const char *error;
1166 const char *sense_txt;
1167 struct scsi_sense_hdr ssh; 1191 struct scsi_sense_hdr ssh;
1168 1192
1169 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); 1193 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh);
@@ -1181,26 +1205,7 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1181 printk("\n"); 1205 printk("\n");
1182 return; 1206 return;
1183 } 1207 }
1184 1208 scsi_print_sense_hdr(name, &ssh);
1185 /* An example of deferred is when an earlier write to disk cache
1186 * succeeded, but now the disk discovers that it cannot write the
1187 * data to the magnetic media.
1188 */
1189 error = scsi_sense_is_deferred(&ssh) ?
1190 "<<DEFERRED>>" : "Current";
1191 printk(KERN_INFO "%s: %s", name, error);
1192 if (ssh.response_code >= 0x72)
1193 printk(" [descriptor]");
1194
1195 sense_txt = scsi_sense_key_string(ssh.sense_key);
1196 if (sense_txt)
1197 printk(": sense key: %s\n", sense_txt);
1198 else
1199 printk(": sense key=0x%x\n", ssh.sense_key);
1200 printk(KERN_INFO " ");
1201 scsi_show_extd_sense(ssh.asc, ssh.ascq);
1202 printk("\n");
1203
1204 if (ssh.response_code < 0x72) { 1209 if (ssh.response_code < 0x72) {
1205 /* only decode extras for "fixed" format now */ 1210 /* only decode extras for "fixed" format now */
1206 char buff[80]; 1211 char buff[80];
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5feb886c3392..85503fad789a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/kthread.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -52,21 +53,80 @@ static struct class shost_class = {
52}; 53};
53 54
54/** 55/**
55 * scsi_host_cancel - cancel outstanding IO to this host 56 * scsi_host_set_state - Take the given host through the host
56 * @shost: pointer to struct Scsi_Host 57 * state model.
57 * recovery: recovery requested to run. 58 * @shost: scsi host to change the state of.
59 * @state: state to change to.
60 *
61 * Returns zero if unsuccessful or an error if the requested
62 * transition is illegal.
58 **/ 63 **/
59static void scsi_host_cancel(struct Scsi_Host *shost, int recovery) 64int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
60{ 65{
61 struct scsi_device *sdev; 66 enum scsi_host_state oldstate = shost->shost_state;
67
68 if (state == oldstate)
69 return 0;
70
71 switch (state) {
72 case SHOST_CREATED:
73 /* There are no legal states that come back to
74 * created. This is the manually initialised start
75 * state */
76 goto illegal;
77
78 case SHOST_RUNNING:
79 switch (oldstate) {
80 case SHOST_CREATED:
81 case SHOST_RECOVERY:
82 break;
83 default:
84 goto illegal;
85 }
86 break;
87
88 case SHOST_RECOVERY:
89 switch (oldstate) {
90 case SHOST_RUNNING:
91 break;
92 default:
93 goto illegal;
94 }
95 break;
96
97 case SHOST_CANCEL:
98 switch (oldstate) {
99 case SHOST_CREATED:
100 case SHOST_RUNNING:
101 break;
102 default:
103 goto illegal;
104 }
105 break;
106
107 case SHOST_DEL:
108 switch (oldstate) {
109 case SHOST_CANCEL:
110 break;
111 default:
112 goto illegal;
113 }
114 break;
62 115
63 set_bit(SHOST_CANCEL, &shost->shost_state);
64 shost_for_each_device(sdev, shost) {
65 scsi_device_cancel(sdev, recovery);
66 } 116 }
67 wait_event(shost->host_wait, (!test_bit(SHOST_RECOVERY, 117 shost->shost_state = state;
68 &shost->shost_state))); 118 return 0;
119
120 illegal:
121 SCSI_LOG_ERROR_RECOVERY(1,
122 dev_printk(KERN_ERR, &shost->shost_gendev,
123 "Illegal host state transition"
124 "%s->%s\n",
125 scsi_host_state_name(oldstate),
126 scsi_host_state_name(state)));
127 return -EINVAL;
69} 128}
129EXPORT_SYMBOL(scsi_host_set_state);
70 130
71/** 131/**
72 * scsi_remove_host - remove a scsi host 132 * scsi_remove_host - remove a scsi host
@@ -74,11 +134,13 @@ static void scsi_host_cancel(struct Scsi_Host *shost, int recovery)
74 **/ 134 **/
75void scsi_remove_host(struct Scsi_Host *shost) 135void scsi_remove_host(struct Scsi_Host *shost)
76{ 136{
137 down(&shost->scan_mutex);
138 scsi_host_set_state(shost, SHOST_CANCEL);
139 up(&shost->scan_mutex);
77 scsi_forget_host(shost); 140 scsi_forget_host(shost);
78 scsi_host_cancel(shost, 0);
79 scsi_proc_host_rm(shost); 141 scsi_proc_host_rm(shost);
80 142
81 set_bit(SHOST_DEL, &shost->shost_state); 143 scsi_host_set_state(shost, SHOST_DEL);
82 144
83 transport_unregister_device(&shost->shost_gendev); 145 transport_unregister_device(&shost->shost_gendev);
84 class_device_unregister(&shost->shost_classdev); 146 class_device_unregister(&shost->shost_classdev);
@@ -115,7 +177,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
115 if (error) 177 if (error)
116 goto out; 178 goto out;
117 179
118 set_bit(SHOST_ADD, &shost->shost_state); 180 scsi_host_set_state(shost, SHOST_RUNNING);
119 get_device(shost->shost_gendev.parent); 181 get_device(shost->shost_gendev.parent);
120 182
121 error = class_device_add(&shost->shost_classdev); 183 error = class_device_add(&shost->shost_classdev);
@@ -164,15 +226,8 @@ static void scsi_host_dev_release(struct device *dev)
164 struct Scsi_Host *shost = dev_to_shost(dev); 226 struct Scsi_Host *shost = dev_to_shost(dev);
165 struct device *parent = dev->parent; 227 struct device *parent = dev->parent;
166 228
167 if (shost->ehandler) { 229 if (shost->ehandler)
168 DECLARE_COMPLETION(sem); 230 kthread_stop(shost->ehandler);
169 shost->eh_notify = &sem;
170 shost->eh_kill = 1;
171 up(shost->eh_wait);
172 wait_for_completion(&sem);
173 shost->eh_notify = NULL;
174 }
175
176 if (shost->work_q) 231 if (shost->work_q)
177 destroy_workqueue(shost->work_q); 232 destroy_workqueue(shost->work_q);
178 233
@@ -202,7 +257,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
202{ 257{
203 struct Scsi_Host *shost; 258 struct Scsi_Host *shost;
204 int gfp_mask = GFP_KERNEL, rval; 259 int gfp_mask = GFP_KERNEL, rval;
205 DECLARE_COMPLETION(complete);
206 260
207 if (sht->unchecked_isa_dma && privsize) 261 if (sht->unchecked_isa_dma && privsize)
208 gfp_mask |= __GFP_DMA; 262 gfp_mask |= __GFP_DMA;
@@ -226,6 +280,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
226 280
227 spin_lock_init(&shost->default_lock); 281 spin_lock_init(&shost->default_lock);
228 scsi_assign_lock(shost, &shost->default_lock); 282 scsi_assign_lock(shost, &shost->default_lock);
283 shost->shost_state = SHOST_CREATED;
229 INIT_LIST_HEAD(&shost->__devices); 284 INIT_LIST_HEAD(&shost->__devices);
230 INIT_LIST_HEAD(&shost->__targets); 285 INIT_LIST_HEAD(&shost->__targets);
231 INIT_LIST_HEAD(&shost->eh_cmd_q); 286 INIT_LIST_HEAD(&shost->eh_cmd_q);
@@ -307,12 +362,12 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
307 snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d", 362 snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
308 shost->host_no); 363 shost->host_no);
309 364
310 shost->eh_notify = &complete; 365 shost->ehandler = kthread_run(scsi_error_handler, shost,
311 rval = kernel_thread(scsi_error_handler, shost, 0); 366 "scsi_eh_%d", shost->host_no);
312 if (rval < 0) 367 if (IS_ERR(shost->ehandler)) {
368 rval = PTR_ERR(shost->ehandler);
313 goto fail_destroy_freelist; 369 goto fail_destroy_freelist;
314 wait_for_completion(&complete); 370 }
315 shost->eh_notify = NULL;
316 371
317 scsi_proc_hostdir_add(shost->hostt); 372 scsi_proc_hostdir_add(shost->hostt);
318 return shost; 373 return shost;
@@ -382,7 +437,7 @@ EXPORT_SYMBOL(scsi_host_lookup);
382 **/ 437 **/
383struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) 438struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
384{ 439{
385 if (test_bit(SHOST_DEL, &shost->shost_state) || 440 if ((shost->shost_state == SHOST_DEL) ||
386 !get_device(&shost->shost_gendev)) 441 !get_device(&shost->shost_gendev))
387 return NULL; 442 return NULL;
388 return shost; 443 return shost;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 2cb3c8340ca8..5b14934ba861 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -87,7 +87,7 @@ static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = 50;
89 89
90#define IBMVSCSI_VERSION "1.5.6" 90#define IBMVSCSI_VERSION "1.5.7"
91 91
92MODULE_DESCRIPTION("IBM Virtual SCSI"); 92MODULE_DESCRIPTION("IBM Virtual SCSI");
93MODULE_AUTHOR("Dave Boutcher"); 93MODULE_AUTHOR("Dave Boutcher");
@@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool,
145 sizeof(*evt->xfer_iu) * i; 145 sizeof(*evt->xfer_iu) * i;
146 evt->xfer_iu = pool->iu_storage + i; 146 evt->xfer_iu = pool->iu_storage + i;
147 evt->hostdata = hostdata; 147 evt->hostdata = hostdata;
148 evt->ext_list = NULL;
149 evt->ext_list_token = 0;
148 } 150 }
149 151
150 return 0; 152 return 0;
@@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool,
161 struct ibmvscsi_host_data *hostdata) 163 struct ibmvscsi_host_data *hostdata)
162{ 164{
163 int i, in_use = 0; 165 int i, in_use = 0;
164 for (i = 0; i < pool->size; ++i) 166 for (i = 0; i < pool->size; ++i) {
165 if (atomic_read(&pool->events[i].free) != 1) 167 if (atomic_read(&pool->events[i].free) != 1)
166 ++in_use; 168 ++in_use;
169 if (pool->events[i].ext_list) {
170 dma_free_coherent(hostdata->dev,
171 SG_ALL * sizeof(struct memory_descriptor),
172 pool->events[i].ext_list,
173 pool->events[i].ext_list_token);
174 }
175 }
167 if (in_use) 176 if (in_use)
168 printk(KERN_WARNING 177 printk(KERN_WARNING
169 "ibmvscsi: releasing event pool with %d " 178 "ibmvscsi: releasing event pool with %d "
@@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
286 } else { 295 } else {
287 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 296 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
288 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; 297 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
289 srp_cmd->data_out_count = numbuf; 298 srp_cmd->data_out_count =
299 numbuf < MAX_INDIRECT_BUFS ?
300 numbuf: MAX_INDIRECT_BUFS;
290 } else { 301 } else {
291 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; 302 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
292 srp_cmd->data_in_count = numbuf; 303 srp_cmd->data_in_count =
304 numbuf < MAX_INDIRECT_BUFS ?
305 numbuf: MAX_INDIRECT_BUFS;
293 } 306 }
294 } 307 }
295} 308}
296 309
310static void unmap_sg_list(int num_entries,
311 struct device *dev,
312 struct memory_descriptor *md)
313{
314 int i;
315
316 for (i = 0; i < num_entries; ++i) {
317 dma_unmap_single(dev,
318 md[i].virtual_address,
319 md[i].length, DMA_BIDIRECTIONAL);
320 }
321}
322
297/** 323/**
298 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 324 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
299 * @cmd: srp_cmd whose additional_data member will be unmapped 325 * @cmd: srp_cmd whose additional_data member will be unmapped
300 * @dev: device for which the memory is mapped 326 * @dev: device for which the memory is mapped
301 * 327 *
302*/ 328*/
303static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) 329static void unmap_cmd_data(struct srp_cmd *cmd,
330 struct srp_event_struct *evt_struct,
331 struct device *dev)
304{ 332{
305 int i;
306
307 if ((cmd->data_out_format == SRP_NO_BUFFER) && 333 if ((cmd->data_out_format == SRP_NO_BUFFER) &&
308 (cmd->data_in_format == SRP_NO_BUFFER)) 334 (cmd->data_in_format == SRP_NO_BUFFER))
309 return; 335 return;
@@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
318 (struct indirect_descriptor *)cmd->additional_data; 344 (struct indirect_descriptor *)cmd->additional_data;
319 int num_mapped = indirect->head.length / 345 int num_mapped = indirect->head.length /
320 sizeof(indirect->list[0]); 346 sizeof(indirect->list[0]);
321 for (i = 0; i < num_mapped; ++i) { 347
322 struct memory_descriptor *data = &indirect->list[i]; 348 if (num_mapped <= MAX_INDIRECT_BUFS) {
323 dma_unmap_single(dev, 349 unmap_sg_list(num_mapped, dev, &indirect->list[0]);
324 data->virtual_address, 350 return;
325 data->length, DMA_BIDIRECTIONAL);
326 } 351 }
352
353 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
327 } 354 }
328} 355}
329 356
357static int map_sg_list(int num_entries,
358 struct scatterlist *sg,
359 struct memory_descriptor *md)
360{
361 int i;
362 u64 total_length = 0;
363
364 for (i = 0; i < num_entries; ++i) {
365 struct memory_descriptor *descr = md + i;
366 struct scatterlist *sg_entry = &sg[i];
367 descr->virtual_address = sg_dma_address(sg_entry);
368 descr->length = sg_dma_len(sg_entry);
369 descr->memory_handle = 0;
370 total_length += sg_dma_len(sg_entry);
371 }
372 return total_length;
373}
374
330/** 375/**
331 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
332 * @cmd: Scsi_Cmnd with the scatterlist 377 * @cmd: Scsi_Cmnd with the scatterlist
@@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
337 * Returns 1 on success. 382 * Returns 1 on success.
338*/ 383*/
339static int map_sg_data(struct scsi_cmnd *cmd, 384static int map_sg_data(struct scsi_cmnd *cmd,
385 struct srp_event_struct *evt_struct,
340 struct srp_cmd *srp_cmd, struct device *dev) 386 struct srp_cmd *srp_cmd, struct device *dev)
341{ 387{
342 388
343 int i, sg_mapped; 389 int sg_mapped;
344 u64 total_length = 0; 390 u64 total_length = 0;
345 struct scatterlist *sg = cmd->request_buffer; 391 struct scatterlist *sg = cmd->request_buffer;
346 struct memory_descriptor *data = 392 struct memory_descriptor *data =
@@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd,
363 return 1; 409 return 1;
364 } 410 }
365 411
366 if (sg_mapped > MAX_INDIRECT_BUFS) { 412 if (sg_mapped > SG_ALL) {
367 printk(KERN_ERR 413 printk(KERN_ERR
368 "ibmvscsi: More than %d mapped sg entries, got %d\n", 414 "ibmvscsi: More than %d mapped sg entries, got %d\n",
369 MAX_INDIRECT_BUFS, sg_mapped); 415 SG_ALL, sg_mapped);
370 return 0; 416 return 0;
371 } 417 }
372 418
373 indirect->head.virtual_address = 0; 419 indirect->head.virtual_address = 0;
374 indirect->head.length = sg_mapped * sizeof(indirect->list[0]); 420 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
375 indirect->head.memory_handle = 0; 421 indirect->head.memory_handle = 0;
376 for (i = 0; i < sg_mapped; ++i) { 422
377 struct memory_descriptor *descr = &indirect->list[i]; 423 if (sg_mapped <= MAX_INDIRECT_BUFS) {
378 struct scatterlist *sg_entry = &sg[i]; 424 total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
379 descr->virtual_address = sg_dma_address(sg_entry); 425 indirect->total_length = total_length;
380 descr->length = sg_dma_len(sg_entry); 426 return 1;
381 descr->memory_handle = 0;
382 total_length += sg_dma_len(sg_entry);
383 } 427 }
384 indirect->total_length = total_length;
385 428
386 return 1; 429 /* get indirect table */
430 if (!evt_struct->ext_list) {
431 evt_struct->ext_list =(struct memory_descriptor*)
432 dma_alloc_coherent(dev,
433 SG_ALL * sizeof(struct memory_descriptor),
434 &evt_struct->ext_list_token, 0);
435 if (!evt_struct->ext_list) {
436 printk(KERN_ERR
437 "ibmvscsi: Can't allocate memory for indirect table\n");
438 return 0;
439
440 }
441 }
442
443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
444
445 indirect->total_length = total_length;
446 indirect->head.virtual_address = evt_struct->ext_list_token;
447 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
448 memcpy(indirect->list, evt_struct->ext_list,
449 MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
450
451 return 1;
387} 452}
388 453
389/** 454/**
@@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd,
428 * Returns 1 on success. 493 * Returns 1 on success.
429*/ 494*/
430static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 495static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
496 struct srp_event_struct *evt_struct,
431 struct srp_cmd *srp_cmd, struct device *dev) 497 struct srp_cmd *srp_cmd, struct device *dev)
432{ 498{
433 switch (cmd->sc_data_direction) { 499 switch (cmd->sc_data_direction) {
@@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
450 if (!cmd->request_buffer) 516 if (!cmd->request_buffer)
451 return 1; 517 return 1;
452 if (cmd->use_sg) 518 if (cmd->use_sg)
453 return map_sg_data(cmd, srp_cmd, dev); 519 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
454 return map_single_data(cmd, srp_cmd, dev); 520 return map_single_data(cmd, srp_cmd, dev);
455} 521}
456 522
@@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
486 printk(KERN_WARNING 552 printk(KERN_WARNING
487 "ibmvscsi: Warning, request_limit exceeded\n"); 553 "ibmvscsi: Warning, request_limit exceeded\n");
488 unmap_cmd_data(&evt_struct->iu.srp.cmd, 554 unmap_cmd_data(&evt_struct->iu.srp.cmd,
555 evt_struct,
489 hostdata->dev); 556 hostdata->dev);
490 free_event_struct(&hostdata->pool, evt_struct); 557 free_event_struct(&hostdata->pool, evt_struct);
491 return SCSI_MLQUEUE_HOST_BUSY; 558 return SCSI_MLQUEUE_HOST_BUSY;
@@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
513 return 0; 580 return 0;
514 581
515 send_error: 582 send_error:
516 unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); 583 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
517 584
518 if ((cmnd = evt_struct->cmnd) != NULL) { 585 if ((cmnd = evt_struct->cmnd) != NULL) {
519 cmnd->result = DID_ERROR << 16; 586 cmnd->result = DID_ERROR << 16;
@@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
551 rsp->sense_and_response_data, 618 rsp->sense_and_response_data,
552 rsp->sense_data_list_length); 619 rsp->sense_data_list_length);
553 unmap_cmd_data(&evt_struct->iu.srp.cmd, 620 unmap_cmd_data(&evt_struct->iu.srp.cmd,
621 evt_struct,
554 evt_struct->hostdata->dev); 622 evt_struct->hostdata->dev);
555 623
556 if (rsp->doover) 624 if (rsp->doover)
@@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
583{ 651{
584 struct srp_cmd *srp_cmd; 652 struct srp_cmd *srp_cmd;
585 struct srp_event_struct *evt_struct; 653 struct srp_event_struct *evt_struct;
654 struct indirect_descriptor *indirect;
586 struct ibmvscsi_host_data *hostdata = 655 struct ibmvscsi_host_data *hostdata =
587 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 656 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
588 u16 lun = lun_from_dev(cmnd->device); 657 u16 lun = lun_from_dev(cmnd->device);
@@ -591,14 +660,6 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
591 if (!evt_struct) 660 if (!evt_struct)
592 return SCSI_MLQUEUE_HOST_BUSY; 661 return SCSI_MLQUEUE_HOST_BUSY;
593 662
594 init_event_struct(evt_struct,
595 handle_cmd_rsp,
596 VIOSRP_SRP_FORMAT,
597 cmnd->timeout);
598
599 evt_struct->cmnd = cmnd;
600 evt_struct->cmnd_done = done;
601
602 /* Set up the actual SRP IU */ 663 /* Set up the actual SRP IU */
603 srp_cmd = &evt_struct->iu.srp.cmd; 664 srp_cmd = &evt_struct->iu.srp.cmd;
604 memset(srp_cmd, 0x00, sizeof(*srp_cmd)); 665 memset(srp_cmd, 0x00, sizeof(*srp_cmd));
@@ -606,17 +667,25 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
606 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 667 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
607 srp_cmd->lun = ((u64) lun) << 48; 668 srp_cmd->lun = ((u64) lun) << 48;
608 669
609 if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { 670 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
610 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 671 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
611 free_event_struct(&hostdata->pool, evt_struct); 672 free_event_struct(&hostdata->pool, evt_struct);
612 return SCSI_MLQUEUE_HOST_BUSY; 673 return SCSI_MLQUEUE_HOST_BUSY;
613 } 674 }
614 675
676 init_event_struct(evt_struct,
677 handle_cmd_rsp,
678 VIOSRP_SRP_FORMAT,
679 cmnd->timeout_per_command/HZ);
680
681 evt_struct->cmnd = cmnd;
682 evt_struct->cmnd_done = done;
683
615 /* Fix up dma address of the buffer itself */ 684 /* Fix up dma address of the buffer itself */
616 if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || 685 indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
617 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { 686 if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
618 struct indirect_descriptor *indirect = 687 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
619 (struct indirect_descriptor *)srp_cmd->additional_data; 688 (indirect->head.virtual_address == 0)) {
620 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + 689 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
621 offsetof(struct srp_cmd, additional_data) + 690 offsetof(struct srp_cmd, additional_data) +
622 offsetof(struct indirect_descriptor, list); 691 offsetof(struct indirect_descriptor, list);
@@ -826,11 +895,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
826 struct srp_event_struct *tmp_evt, *found_evt; 895 struct srp_event_struct *tmp_evt, *found_evt;
827 union viosrp_iu srp_rsp; 896 union viosrp_iu srp_rsp;
828 int rsp_rc; 897 int rsp_rc;
898 unsigned long flags;
829 u16 lun = lun_from_dev(cmd->device); 899 u16 lun = lun_from_dev(cmd->device);
830 900
831 /* First, find this command in our sent list so we can figure 901 /* First, find this command in our sent list so we can figure
832 * out the correct tag 902 * out the correct tag
833 */ 903 */
904 spin_lock_irqsave(hostdata->host->host_lock, flags);
834 found_evt = NULL; 905 found_evt = NULL;
835 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 906 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
836 if (tmp_evt->cmnd == cmd) { 907 if (tmp_evt->cmnd == cmd) {
@@ -839,11 +910,14 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
839 } 910 }
840 } 911 }
841 912
842 if (!found_evt) 913 if (!found_evt) {
914 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
843 return FAILED; 915 return FAILED;
916 }
844 917
845 evt = get_event_struct(&hostdata->pool); 918 evt = get_event_struct(&hostdata->pool);
846 if (evt == NULL) { 919 if (evt == NULL) {
920 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
847 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 921 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
848 return FAILED; 922 return FAILED;
849 } 923 }
@@ -867,7 +941,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
867 941
868 evt->sync_srp = &srp_rsp; 942 evt->sync_srp = &srp_rsp;
869 init_completion(&evt->comp); 943 init_completion(&evt->comp);
870 if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { 944 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
945 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
946 if (rsp_rc != 0) {
871 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 947 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
872 return FAILED; 948 return FAILED;
873 } 949 }
@@ -901,6 +977,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
901 * The event is no longer in our list. Make sure it didn't 977 * The event is no longer in our list. Make sure it didn't
902 * complete while we were aborting 978 * complete while we were aborting
903 */ 979 */
980 spin_lock_irqsave(hostdata->host->host_lock, flags);
904 found_evt = NULL; 981 found_evt = NULL;
905 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 982 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
906 if (tmp_evt->cmnd == cmd) { 983 if (tmp_evt->cmnd == cmd) {
@@ -910,6 +987,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
910 } 987 }
911 988
912 if (found_evt == NULL) { 989 if (found_evt == NULL) {
990 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
913 printk(KERN_INFO 991 printk(KERN_INFO
914 "ibmvscsi: aborted task tag 0x%lx completed\n", 992 "ibmvscsi: aborted task tag 0x%lx completed\n",
915 tsk_mgmt->managed_task_tag); 993 tsk_mgmt->managed_task_tag);
@@ -922,8 +1000,10 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
922 1000
923 cmd->result = (DID_ABORT << 16); 1001 cmd->result = (DID_ABORT << 16);
924 list_del(&found_evt->list); 1002 list_del(&found_evt->list);
925 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); 1003 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1004 found_evt->hostdata->dev);
926 free_event_struct(&found_evt->hostdata->pool, found_evt); 1005 free_event_struct(&found_evt->hostdata->pool, found_evt);
1006 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
927 atomic_inc(&hostdata->request_limit); 1007 atomic_inc(&hostdata->request_limit);
928 return SUCCESS; 1008 return SUCCESS;
929} 1009}
@@ -943,10 +1023,13 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
943 struct srp_event_struct *tmp_evt, *pos; 1023 struct srp_event_struct *tmp_evt, *pos;
944 union viosrp_iu srp_rsp; 1024 union viosrp_iu srp_rsp;
945 int rsp_rc; 1025 int rsp_rc;
1026 unsigned long flags;
946 u16 lun = lun_from_dev(cmd->device); 1027 u16 lun = lun_from_dev(cmd->device);
947 1028
1029 spin_lock_irqsave(hostdata->host->host_lock, flags);
948 evt = get_event_struct(&hostdata->pool); 1030 evt = get_event_struct(&hostdata->pool);
949 if (evt == NULL) { 1031 if (evt == NULL) {
1032 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
950 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1033 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
951 return FAILED; 1034 return FAILED;
952 } 1035 }
@@ -969,7 +1052,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
969 1052
970 evt->sync_srp = &srp_rsp; 1053 evt->sync_srp = &srp_rsp;
971 init_completion(&evt->comp); 1054 init_completion(&evt->comp);
972 if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { 1055 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
1056 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1057 if (rsp_rc != 0) {
973 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1058 printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
974 return FAILED; 1059 return FAILED;
975 } 1060 }
@@ -1002,12 +1087,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1002 /* We need to find all commands for this LUN that have not yet been 1087 /* We need to find all commands for this LUN that have not yet been
1003 * responded to, and fail them with DID_RESET 1088 * responded to, and fail them with DID_RESET
1004 */ 1089 */
1090 spin_lock_irqsave(hostdata->host->host_lock, flags);
1005 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1091 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1006 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { 1092 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1007 if (tmp_evt->cmnd) 1093 if (tmp_evt->cmnd)
1008 tmp_evt->cmnd->result = (DID_RESET << 16); 1094 tmp_evt->cmnd->result = (DID_RESET << 16);
1009 list_del(&tmp_evt->list); 1095 list_del(&tmp_evt->list);
1010 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); 1096 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1097 tmp_evt->hostdata->dev);
1011 free_event_struct(&tmp_evt->hostdata->pool, 1098 free_event_struct(&tmp_evt->hostdata->pool,
1012 tmp_evt); 1099 tmp_evt);
1013 atomic_inc(&hostdata->request_limit); 1100 atomic_inc(&hostdata->request_limit);
@@ -1017,6 +1104,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1017 tmp_evt->done(tmp_evt); 1104 tmp_evt->done(tmp_evt);
1018 } 1105 }
1019 } 1106 }
1107 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1020 return SUCCESS; 1108 return SUCCESS;
1021} 1109}
1022 1110
@@ -1035,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
1035 if (tmp_evt->cmnd) { 1123 if (tmp_evt->cmnd) {
1036 tmp_evt->cmnd->result = (DID_ERROR << 16); 1124 tmp_evt->cmnd->result = (DID_ERROR << 16);
1037 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1125 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
1126 tmp_evt,
1038 tmp_evt->hostdata->dev); 1127 tmp_evt->hostdata->dev);
1039 if (tmp_evt->cmnd_done) 1128 if (tmp_evt->cmnd_done)
1040 tmp_evt->cmnd_done(tmp_evt->cmnd); 1129 tmp_evt->cmnd_done(tmp_evt->cmnd);
@@ -1339,7 +1428,7 @@ static struct scsi_host_template driver_template = {
1339 .cmd_per_lun = 16, 1428 .cmd_per_lun = 16,
1340 .can_queue = 1, /* Updated after SRP_LOGIN */ 1429 .can_queue = 1, /* Updated after SRP_LOGIN */
1341 .this_id = -1, 1430 .this_id = -1,
1342 .sg_tablesize = MAX_INDIRECT_BUFS, 1431 .sg_tablesize = SG_ALL,
1343 .use_clustering = ENABLE_CLUSTERING, 1432 .use_clustering = ENABLE_CLUSTERING,
1344 .shost_attrs = ibmvscsi_attrs, 1433 .shost_attrs = ibmvscsi_attrs,
1345}; 1434};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 1030b703c30e..8bec0438dc8a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -68,6 +68,8 @@ struct srp_event_struct {
68 void (*cmnd_done) (struct scsi_cmnd *); 68 void (*cmnd_done) (struct scsi_cmnd *);
69 struct completion comp; 69 struct completion comp;
70 union viosrp_iu *sync_srp; 70 union viosrp_iu *sync_srp;
71 struct memory_descriptor *ext_list;
72 dma_addr_t ext_list_token;
71}; 73};
72 74
73/* a pool of event structs for use */ 75/* a pool of event structs for use */
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index a872fad2326a..62095bdb173a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -76,6 +76,10 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc);
76static unsigned int ata_unique_id = 1; 76static unsigned int ata_unique_id = 1;
77static struct workqueue_struct *ata_wq; 77static struct workqueue_struct *ata_wq;
78 78
79int atapi_enabled = 0;
80module_param(atapi_enabled, int, 0444);
81MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82
79MODULE_AUTHOR("Jeff Garzik"); 83MODULE_AUTHOR("Jeff Garzik");
80MODULE_DESCRIPTION("Library module for ATA devices"); 84MODULE_DESCRIPTION("Library module for ATA devices");
81MODULE_LICENSE("GPL"); 85MODULE_LICENSE("GPL");
@@ -2613,7 +2617,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
2613 * @ap: port to read/write 2617 * @ap: port to read/write
2614 * @buf: data buffer 2618 * @buf: data buffer
2615 * @buflen: buffer length 2619 * @buflen: buffer length
2616 * @do_write: read/write 2620 * @write_data: read/write
2617 * 2621 *
2618 * Transfer data from/to the device data register by MMIO. 2622 * Transfer data from/to the device data register by MMIO.
2619 * 2623 *
@@ -2659,7 +2663,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2659 * @ap: port to read/write 2663 * @ap: port to read/write
2660 * @buf: data buffer 2664 * @buf: data buffer
2661 * @buflen: buffer length 2665 * @buflen: buffer length
2662 * @do_write: read/write 2666 * @write_data: read/write
2663 * 2667 *
2664 * Transfer data from/to the device data register by PIO. 2668 * Transfer data from/to the device data register by PIO.
2665 * 2669 *
@@ -4290,6 +4294,15 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4290 4294
4291 4295
4292 4296
4297#ifdef CONFIG_PCI
4298
4299void ata_pci_host_stop (struct ata_host_set *host_set)
4300{
4301 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4302
4303 pci_iounmap(pdev, host_set->mmio_base);
4304}
4305
4293/** 4306/**
4294 * ata_pci_init_native_mode - Initialize native-mode driver 4307 * ata_pci_init_native_mode - Initialize native-mode driver
4295 * @pdev: pci device to be initialized 4308 * @pdev: pci device to be initialized
@@ -4302,7 +4315,6 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4302 * ata_probe_ent structure should then be freed with kfree(). 4315 * ata_probe_ent structure should then be freed with kfree().
4303 */ 4316 */
4304 4317
4305#ifdef CONFIG_PCI
4306struct ata_probe_ent * 4318struct ata_probe_ent *
4307ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4319ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4308{ 4320{
@@ -4685,6 +4697,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4685 4697
4686#ifdef CONFIG_PCI 4698#ifdef CONFIG_PCI
4687EXPORT_SYMBOL_GPL(pci_test_config_bits); 4699EXPORT_SYMBOL_GPL(pci_test_config_bits);
4700EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4688EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 4701EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4689EXPORT_SYMBOL_GPL(ata_pci_init_one); 4702EXPORT_SYMBOL_GPL(ata_pci_init_one);
4690EXPORT_SYMBOL_GPL(ata_pci_remove_one); 4703EXPORT_SYMBOL_GPL(ata_pci_remove_one);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index f8ddc2a29e9a..03b7a6dd95fe 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1537,10 +1537,10 @@ ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev)
1537 if (unlikely(!ata_dev_present(dev))) 1537 if (unlikely(!ata_dev_present(dev)))
1538 return NULL; 1538 return NULL;
1539 1539
1540#ifndef ATA_ENABLE_ATAPI 1540 if (!atapi_enabled) {
1541 if (unlikely(dev->class == ATA_DEV_ATAPI)) 1541 if (unlikely(dev->class == ATA_DEV_ATAPI))
1542 return NULL; 1542 return NULL;
1543#endif 1543 }
1544 1544
1545 return dev; 1545 return dev;
1546} 1546}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 809c634afbcd..d608b3a0f6fe 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -38,6 +38,7 @@ struct ata_scsi_args {
38}; 38};
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled;
41extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
42 struct ata_device *dev); 43 struct ata_device *dev);
43extern void ata_qc_free(struct ata_queued_cmd *qc); 44extern void ata_qc_free(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3bb82aae432e..adb95674823f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -342,9 +342,6 @@ struct lpfc_hba {
342#define VPD_MASK 0xf /* mask for any vpd data */ 342#define VPD_MASK 0xf /* mask for any vpd data */
343 343
344 struct timer_list els_tmofunc; 344 struct timer_list els_tmofunc;
345
346 void *link_stats;
347
348 /* 345 /*
349 * stat counters 346 * stat counters
350 */ 347 */
@@ -370,6 +367,8 @@ struct lpfc_hba {
370 struct list_head freebufList; 367 struct list_head freebufList;
371 struct list_head ctrspbuflist; 368 struct list_head ctrspbuflist;
372 struct list_head rnidrspbuflist; 369 struct list_head rnidrspbuflist;
370
371 struct fc_host_statistics link_stats;
373}; 372};
374 373
375 374
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3cea92883019..0e089a42c03a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_tcq.h> 29#include <scsi/scsi_tcq.h>
@@ -988,8 +989,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
988{ 989{
989 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 990 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
990 struct lpfc_sli *psli = &phba->sli; 991 struct lpfc_sli *psli = &phba->sli;
991 struct fc_host_statistics *hs = 992 struct fc_host_statistics *hs = &phba->link_stats;
992 (struct fc_host_statistics *)phba->link_stats;
993 LPFC_MBOXQ_t *pmboxq; 993 LPFC_MBOXQ_t *pmboxq;
994 MAILBOX_t *pmb; 994 MAILBOX_t *pmb;
995 int rc=0; 995 int rc=0;
@@ -1020,6 +1020,8 @@ lpfc_get_stats(struct Scsi_Host *shost)
1020 return NULL; 1020 return NULL;
1021 } 1021 }
1022 1022
1023 memset(hs, 0, sizeof (struct fc_host_statistics));
1024
1023 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 1025 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
1024 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); 1026 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
1025 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 1027 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 78adee4699af..1280f0e54636 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -27,8 +27,10 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29 29
30#include <scsi/scsi.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
32 34
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2b1c9572dae7..63caf7fe9725 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 233901e9dfde..0a8269d6b130 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -24,6 +24,7 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26 26
27#include <scsi/scsi.h>
27#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
@@ -1135,6 +1136,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1135 switch(list) { 1136 switch(list) {
1136 case NLP_NO_LIST: /* No list, just remove it */ 1137 case NLP_NO_LIST: /* No list, just remove it */
1137 lpfc_nlp_remove(phba, nlp); 1138 lpfc_nlp_remove(phba, nlp);
1139 /* as node removed - stop further transport calls */
1140 rport_del = none;
1138 break; 1141 break;
1139 case NLP_UNUSED_LIST: 1142 case NLP_UNUSED_LIST:
1140 spin_lock_irq(phba->host->host_lock); 1143 spin_lock_irq(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 34d416d2b007..6f3cb59bf9e0 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30 30
31#include <scsi/scsi.h>
31#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
@@ -1339,14 +1340,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1339 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1340 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1340 goto out_disable_device; 1341 goto out_disable_device;
1341 1342
1342 host = scsi_host_alloc(&lpfc_template, 1343 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
1343 sizeof (struct lpfc_hba) + sizeof (unsigned long));
1344 if (!host) 1344 if (!host)
1345 goto out_release_regions; 1345 goto out_release_regions;
1346 1346
1347 phba = (struct lpfc_hba*)host->hostdata; 1347 phba = (struct lpfc_hba*)host->hostdata;
1348 memset(phba, 0, sizeof (struct lpfc_hba)); 1348 memset(phba, 0, sizeof (struct lpfc_hba));
1349 phba->link_stats = (void *)&phba[1];
1350 phba->host = host; 1349 phba->host = host;
1351 1350
1352 phba->fc_flag |= FC_LOADING; 1351 phba->fc_flag |= FC_LOADING;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c27cf94795db..73eb89f91593 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -23,6 +23,11 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h>
30
26#include "lpfc_hw.h" 31#include "lpfc_hw.h"
27#include "lpfc_sli.h" 32#include "lpfc_sli.h"
28#include "lpfc_disc.h" 33#include "lpfc_disc.h"
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index a5cfb6421fa9..0aba13ceaacf 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -23,6 +23,11 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h>
30
26#include "lpfc_hw.h" 31#include "lpfc_hw.h"
27#include "lpfc_sli.h" 32#include "lpfc_sli.h"
28#include "lpfc_disc.h" 33#include "lpfc_disc.h"
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 45dc0210fc49..9b35eaac781d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 17e4974d4445..b5ad1871d34b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -40,11 +40,6 @@
40#define LPFC_RESET_WAIT 2 40#define LPFC_RESET_WAIT 2
41#define LPFC_ABORT_WAIT 2 41#define LPFC_ABORT_WAIT 2
42 42
43static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
44{
45 fcmd->fcpLunLsl = 0;
46 fcmd->fcpLunMsl = swab16((uint16_t)lun);
47}
48 43
49/* 44/*
50 * This routine allocates a scsi buffer, which contains all the necessary 45 * This routine allocates a scsi buffer, which contains all the necessary
@@ -238,6 +233,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
238 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 233 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
239 if (datadir == DMA_TO_DEVICE) 234 if (datadir == DMA_TO_DEVICE)
240 bpl->tus.f.bdeFlags = 0; 235 bpl->tus.f.bdeFlags = 0;
236 else
237 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
241 bpl->tus.w = le32_to_cpu(bpl->tus.w); 238 bpl->tus.w = le32_to_cpu(bpl->tus.w);
242 num_bde = 1; 239 num_bde = 1;
243 bpl++; 240 bpl++;
@@ -245,8 +242,11 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
245 242
246 /* 243 /*
247 * Finish initializing those IOCB fields that are dependent on the 244 * Finish initializing those IOCB fields that are dependent on the
248 * scsi_cmnd request_buffer 245 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
246 * reinitialized since all iocb memory resources are used many times
247 * for transmit, receive, and continuation bpl's.
249 */ 248 */
249 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
250 iocb_cmd->un.fcpi64.bdl.bdeSize += 250 iocb_cmd->un.fcpi64.bdl.bdeSize +=
251 (num_bde * sizeof (struct ulp_bde64)); 251 (num_bde * sizeof (struct ulp_bde64));
252 iocb_cmd->ulpBdeCount = 1; 252 iocb_cmd->ulpBdeCount = 1;
@@ -445,8 +445,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
445 int datadir = scsi_cmnd->sc_data_direction; 445 int datadir = scsi_cmnd->sc_data_direction;
446 446
447 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 447 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
448 /* clear task management bits */
449 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
448 450
449 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); 451 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
452 &lpfc_cmd->fcp_cmnd->fcp_lun);
450 453
451 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 454 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
452 455
@@ -545,7 +548,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
545 piocb = &piocbq->iocb; 548 piocb = &piocbq->iocb;
546 549
547 fcp_cmnd = lpfc_cmd->fcp_cmnd; 550 fcp_cmnd = lpfc_cmd->fcp_cmnd;
548 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); 551 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
552 &lpfc_cmd->fcp_cmnd->fcp_lun);
549 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 553 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
550 554
551 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 555 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -746,6 +750,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
746 cmnd->result = ScsiResult(DID_NO_CONNECT, 0); 750 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
747 goto out_fail_command; 751 goto out_fail_command;
748 } 752 }
753 else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
754 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
755 goto out_fail_command;
756 }
749 /* 757 /*
750 * The device is most likely recovered and the driver 758 * The device is most likely recovered and the driver
751 * needs a bit more time to finish. Ask the midlayer 759 * needs a bit more time to finish. Ask the midlayer
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0fd9ba14e1b5..acd64c49e849 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -78,18 +78,7 @@ struct fcp_rsp {
78}; 78};
79 79
80struct fcp_cmnd { 80struct fcp_cmnd {
81 uint32_t fcpLunMsl; /* most significant lun word (32 bits) */ 81 struct scsi_lun fcp_lun;
82 uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
83 /* # of bits to shift lun id to end up in right
84 * payload word, little endian = 8, big = 16.
85 */
86#ifdef __BIG_ENDIAN
87#define FC_LUN_SHIFT 16
88#define FC_ADDR_MODE_SHIFT 24
89#else /* __LITTLE_ENDIAN */
90#define FC_LUN_SHIFT 8
91#define FC_ADDR_MODE_SHIFT 0
92#endif
93 82
94 uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ 83 uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
95 uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ 84 uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1775508ed276..e74e224fd77c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -24,9 +24,11 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26 26
27#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h>
30 32
31#include "lpfc_hw.h" 33#include "lpfc_hw.h"
32#include "lpfc_sli.h" 34#include "lpfc_sli.h"
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 47dea48ee0ec..7e6747b06f90 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.0.29" 21#define LPFC_DRIVER_VERSION "8.0.30"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index ff1933298da6..a4857db4f9b8 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1766,7 +1766,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t state)
1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1767 unsigned long flags; 1767 unsigned long flags;
1768 1768
1769 if (state == mdev->ofdev.dev.power.power_state || state < 2) 1769 if (state.event == mdev->ofdev.dev.power.power_state.event || state.event < 2)
1770 return 0; 1770 return 0;
1771 1771
1772 scsi_block_requests(ms->host); 1772 scsi_block_requests(ms->host);
@@ -1791,7 +1791,7 @@ static int mesh_resume(struct macio_dev *mdev)
1791 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1791 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1792 unsigned long flags; 1792 unsigned long flags;
1793 1793
1794 if (mdev->ofdev.dev.power.power_state == 0) 1794 if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
1795 return 0; 1795 return 0;
1796 1796
1797 set_mesh_power(ms, 1); 1797 set_mesh_power(ms, 1);
@@ -1802,7 +1802,7 @@ static int mesh_resume(struct macio_dev *mdev)
1802 enable_irq(ms->meshintr); 1802 enable_irq(ms->meshintr);
1803 scsi_unblock_requests(ms->host); 1803 scsi_unblock_requests(ms->host);
1804 1804
1805 mdev->ofdev.dev.power.power_state = 0; 1805 mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1806 1806
1807 return 0; 1807 return 0;
1808} 1808}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b993652bfa25..637fb6565d28 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -996,7 +996,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
996 break; 996 break;
997 997
998 case ABORT_DEVICE: 998 case ABORT_DEVICE:
999 ha->flags.in_reset = 1;
1000 if (qla1280_verbose) 999 if (qla1280_verbose)
1001 printk(KERN_INFO 1000 printk(KERN_INFO
1002 "scsi(%ld:%d:%d:%d): Queueing abort device " 1001 "scsi(%ld:%d:%d:%d): Queueing abort device "
@@ -1010,7 +1009,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1010 printk(KERN_INFO 1009 printk(KERN_INFO
1011 "scsi(%ld:%d:%d:%d): Queueing device reset " 1010 "scsi(%ld:%d:%d:%d): Queueing device reset "
1012 "command.\n", ha->host_no, bus, target, lun); 1011 "command.\n", ha->host_no, bus, target, lun);
1013 ha->flags.in_reset = 1;
1014 if (qla1280_device_reset(ha, bus, target) == 0) 1012 if (qla1280_device_reset(ha, bus, target) == 0)
1015 result = SUCCESS; 1013 result = SUCCESS;
1016 break; 1014 break;
@@ -1019,7 +1017,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1019 if (qla1280_verbose) 1017 if (qla1280_verbose)
1020 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS " 1018 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS "
1021 "DEVICE RESET\n", ha->host_no, bus); 1019 "DEVICE RESET\n", ha->host_no, bus);
1022 ha->flags.in_reset = 1;
1023 if (qla1280_bus_reset(ha, bus == 0)) 1020 if (qla1280_bus_reset(ha, bus == 0))
1024 result = SUCCESS; 1021 result = SUCCESS;
1025 1022
@@ -1047,7 +1044,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1047 1044
1048 if (!list_empty(&ha->done_q)) 1045 if (!list_empty(&ha->done_q))
1049 qla1280_done(ha); 1046 qla1280_done(ha);
1050 ha->flags.in_reset = 0;
1051 1047
1052 /* If we didn't manage to issue the action, or we have no 1048 /* If we didn't manage to issue the action, or we have no
1053 * command to wait for, exit here */ 1049 * command to wait for, exit here */
@@ -1269,6 +1265,22 @@ qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[])
1269 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom); 1265 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom);
1270} 1266}
1271#endif 1267#endif
1268
1269/* disable risc and host interrupts */
1270static inline void
1271qla1280_disable_intrs(struct scsi_qla_host *ha)
1272{
1273 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1274 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1275}
1276
1277/* enable risc and host interrupts */
1278static inline void
1279qla1280_enable_intrs(struct scsi_qla_host *ha)
1280{
1281 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1282 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1283}
1272 1284
1273/************************************************************************** 1285/**************************************************************************
1274 * qla1280_intr_handler 1286 * qla1280_intr_handler
@@ -1290,7 +1302,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1290 ha->isr_count++; 1302 ha->isr_count++;
1291 reg = ha->iobase; 1303 reg = ha->iobase;
1292 1304
1293 WRT_REG_WORD(&reg->ictrl, 0); /* disable our interrupt. */ 1305 qla1280_disable_intrs(ha);
1294 1306
1295 data = qla1280_debounce_register(&reg->istatus); 1307 data = qla1280_debounce_register(&reg->istatus);
1296 /* Check for pending interrupts. */ 1308 /* Check for pending interrupts. */
@@ -1303,8 +1315,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1303 1315
1304 spin_unlock(HOST_LOCK); 1316 spin_unlock(HOST_LOCK);
1305 1317
1306 /* enable our interrupt. */ 1318 qla1280_enable_intrs(ha);
1307 WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1308 1319
1309 LEAVE_INTR("qla1280_intr_handler"); 1320 LEAVE_INTR("qla1280_intr_handler");
1310 return IRQ_RETVAL(handled); 1321 return IRQ_RETVAL(handled);
@@ -1317,7 +1328,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1317 uint8_t mr; 1328 uint8_t mr;
1318 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1329 uint16_t mb[MAILBOX_REGISTER_COUNT];
1319 struct nvram *nv; 1330 struct nvram *nv;
1320 int status; 1331 int status, lun;
1321 1332
1322 nv = &ha->nvram; 1333 nv = &ha->nvram;
1323 1334
@@ -1325,24 +1336,38 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1325 1336
1326 /* Set Target Parameters. */ 1337 /* Set Target Parameters. */
1327 mb[0] = MBC_SET_TARGET_PARAMETERS; 1338 mb[0] = MBC_SET_TARGET_PARAMETERS;
1328 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 1339 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1329 mb[1] <<= 8; 1340 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1330 1341 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1331 mb[2] = (nv->bus[bus].target[target].parameter.c << 8); 1342 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1343 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1344 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1345 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1346 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1347 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1332 1348
1333 if (IS_ISP1x160(ha)) { 1349 if (IS_ISP1x160(ha)) {
1334 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1350 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1335 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8) | 1351 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1336 nv->bus[bus].target[target].sync_period;
1337 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1352 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1338 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1353 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1339 mr |= BIT_6; 1354 mr |= BIT_6;
1340 } else { 1355 } else {
1341 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8) | 1356 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1342 nv->bus[bus].target[target].sync_period;
1343 } 1357 }
1358 mb[3] |= nv->bus[bus].target[target].sync_period;
1344 1359
1345 status = qla1280_mailbox_command(ha, mr, &mb[0]); 1360 status = qla1280_mailbox_command(ha, mr, mb);
1361
1362 /* Set Device Queue Parameters. */
1363 for (lun = 0; lun < MAX_LUNS; lun++) {
1364 mb[0] = MBC_SET_DEVICE_QUEUE;
1365 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1366 mb[1] |= lun;
1367 mb[2] = nv->bus[bus].max_queue_depth;
1368 mb[3] = nv->bus[bus].target[target].execution_throttle;
1369 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1370 }
1346 1371
1347 if (status) 1372 if (status)
1348 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1373 printk(KERN_WARNING "scsi(%ld:%i:%i): "
@@ -1389,19 +1414,19 @@ qla1280_slave_configure(struct scsi_device *device)
1389 } 1414 }
1390 1415
1391#if LINUX_VERSION_CODE > 0x020500 1416#if LINUX_VERSION_CODE > 0x020500
1392 nv->bus[bus].target[target].parameter.f.enable_sync = device->sdtr; 1417 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1393 nv->bus[bus].target[target].parameter.f.enable_wide = device->wdtr; 1418 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1394 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1419 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1395#endif 1420#endif
1396 1421
1397 if (driver_setup.no_sync || 1422 if (driver_setup.no_sync ||
1398 (driver_setup.sync_mask && 1423 (driver_setup.sync_mask &&
1399 (~driver_setup.sync_mask & (1 << target)))) 1424 (~driver_setup.sync_mask & (1 << target))))
1400 nv->bus[bus].target[target].parameter.f.enable_sync = 0; 1425 nv->bus[bus].target[target].parameter.enable_sync = 0;
1401 if (driver_setup.no_wide || 1426 if (driver_setup.no_wide ||
1402 (driver_setup.wide_mask && 1427 (driver_setup.wide_mask &&
1403 (~driver_setup.wide_mask & (1 << target)))) 1428 (~driver_setup.wide_mask & (1 << target))))
1404 nv->bus[bus].target[target].parameter.f.enable_wide = 0; 1429 nv->bus[bus].target[target].parameter.enable_wide = 0;
1405 if (IS_ISP1x160(ha)) { 1430 if (IS_ISP1x160(ha)) {
1406 if (driver_setup.no_ppr || 1431 if (driver_setup.no_ppr ||
1407 (driver_setup.ppr_mask && 1432 (driver_setup.ppr_mask &&
@@ -1410,7 +1435,7 @@ qla1280_slave_configure(struct scsi_device *device)
1410 } 1435 }
1411 1436
1412 spin_lock_irqsave(HOST_LOCK, flags); 1437 spin_lock_irqsave(HOST_LOCK, flags);
1413 if (nv->bus[bus].target[target].parameter.f.enable_sync) 1438 if (nv->bus[bus].target[target].parameter.enable_sync)
1414 status = qla1280_set_target_parameters(ha, bus, target); 1439 status = qla1280_set_target_parameters(ha, bus, target);
1415 qla1280_get_target_parameters(ha, device); 1440 qla1280_get_target_parameters(ha, device);
1416 spin_unlock_irqrestore(HOST_LOCK, flags); 1441 spin_unlock_irqrestore(HOST_LOCK, flags);
@@ -1448,7 +1473,6 @@ qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q)
1448 * 1473 *
1449 * Input: 1474 * Input:
1450 * ha = adapter block pointer. 1475 * ha = adapter block pointer.
1451 * done_q = done queue.
1452 */ 1476 */
1453static void 1477static void
1454qla1280_done(struct scsi_qla_host *ha) 1478qla1280_done(struct scsi_qla_host *ha)
@@ -1522,7 +1546,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1522 int host_status = DID_ERROR; 1546 int host_status = DID_ERROR;
1523 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1547 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1524 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1548 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1525 uint16_t residual_length = le16_to_cpu(sts->residual_length); 1549 uint16_t residual_length = le32_to_cpu(sts->residual_length);
1526 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1550 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1527#if DEBUG_QLA1280_INTR 1551#if DEBUG_QLA1280_INTR
1528 static char *reason[] = { 1552 static char *reason[] = {
@@ -1582,7 +1606,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1582 1606
1583 case CS_DATA_OVERRUN: 1607 case CS_DATA_OVERRUN:
1584 dprintk(2, "Data overrun 0x%x\n", residual_length); 1608 dprintk(2, "Data overrun 0x%x\n", residual_length);
1585 dprintk(2, "qla1280_isr: response packet data\n"); 1609 dprintk(2, "qla1280_return_status: response packet data\n");
1586 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1610 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1587 host_status = DID_ERROR; 1611 host_status = DID_ERROR;
1588 break; 1612 break;
@@ -1617,40 +1641,6 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1617/* QLogic ISP1280 Hardware Support Functions. */ 1641/* QLogic ISP1280 Hardware Support Functions. */
1618/****************************************************************************/ 1642/****************************************************************************/
1619 1643
1620 /*
1621 * qla2100_enable_intrs
1622 * qla2100_disable_intrs
1623 *
1624 * Input:
1625 * ha = adapter block pointer.
1626 *
1627 * Returns:
1628 * None
1629 */
1630static inline void
1631qla1280_enable_intrs(struct scsi_qla_host *ha)
1632{
1633 struct device_reg __iomem *reg;
1634
1635 reg = ha->iobase;
1636 /* enable risc and host interrupts */
1637 WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1638 RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
1639 ha->flags.ints_enabled = 1;
1640}
1641
1642static inline void
1643qla1280_disable_intrs(struct scsi_qla_host *ha)
1644{
1645 struct device_reg __iomem *reg;
1646
1647 reg = ha->iobase;
1648 /* disable risc and host interrupts */
1649 WRT_REG_WORD(&reg->ictrl, 0);
1650 RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
1651 ha->flags.ints_enabled = 0;
1652}
1653
1654/* 1644/*
1655 * qla1280_initialize_adapter 1645 * qla1280_initialize_adapter
1656 * Initialize board. 1646 * Initialize board.
@@ -1679,7 +1669,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
1679 ha->flags.reset_active = 0; 1669 ha->flags.reset_active = 0;
1680 ha->flags.abort_isp_active = 0; 1670 ha->flags.abort_isp_active = 0;
1681 1671
1682 ha->flags.ints_enabled = 0;
1683#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1672#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1684 if (ia64_platform_is("sn2")) { 1673 if (ia64_platform_is("sn2")) {
1685 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1674 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
@@ -1758,69 +1747,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
1758 return status; 1747 return status;
1759} 1748}
1760 1749
1761
1762/*
1763 * ISP Firmware Test
1764 * Checks if present version of RISC firmware is older than
1765 * driver firmware.
1766 *
1767 * Input:
1768 * ha = adapter block pointer.
1769 *
1770 * Returns:
1771 * 0 = firmware does not need to be loaded.
1772 */
1773static int
1774qla1280_isp_firmware(struct scsi_qla_host *ha)
1775{
1776 struct nvram *nv = (struct nvram *) ha->response_ring;
1777 int status = 0; /* dg 2/27 always loads RISC */
1778 uint16_t mb[MAILBOX_REGISTER_COUNT];
1779
1780 ENTER("qla1280_isp_firmware");
1781
1782 dprintk(1, "scsi(%li): Determining if RISC is loaded\n", ha->host_no);
1783
1784 /* Bad NVRAM data, load RISC code. */
1785 if (!ha->nvram_valid) {
1786 ha->flags.disable_risc_code_load = 0;
1787 } else
1788 ha->flags.disable_risc_code_load =
1789 nv->cntr_flags_1.disable_loading_risc_code;
1790
1791 if (ha->flags.disable_risc_code_load) {
1792 dprintk(3, "qla1280_isp_firmware: Telling RISC to verify "
1793 "checksum of loaded BIOS code.\n");
1794
1795 /* Verify checksum of loaded RISC code. */
1796 mb[0] = MBC_VERIFY_CHECKSUM;
1797 /* mb[1] = ql12_risc_code_addr01; */
1798 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1799
1800 if (!(status =
1801 qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]))) {
1802 /* Start firmware execution. */
1803 dprintk(3, "qla1280_isp_firmware: Startng F/W "
1804 "execution.\n");
1805
1806 mb[0] = MBC_EXECUTE_FIRMWARE;
1807 /* mb[1] = ql12_risc_code_addr01; */
1808 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1809 qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1810 } else
1811 printk(KERN_INFO "qla1280: RISC checksum failed.\n");
1812 } else {
1813 dprintk(1, "qla1280: NVRAM configured to load RISC load.\n");
1814 status = 1;
1815 }
1816
1817 if (status)
1818 dprintk(2, "qla1280_isp_firmware: **** Load RISC code ****\n");
1819
1820 LEAVE("qla1280_isp_firmware");
1821 return status;
1822}
1823
1824/* 1750/*
1825 * Chip diagnostics 1751 * Chip diagnostics
1826 * Test chip for proper operation. 1752 * Test chip for proper operation.
@@ -2006,7 +1932,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
2006 "%d,%d(0x%x)\n", 1932 "%d,%d(0x%x)\n",
2007 risc_code_address, cnt, num, risc_address); 1933 risc_code_address, cnt, num, risc_address);
2008 for(i = 0; i < cnt; i++) 1934 for(i = 0; i < cnt; i++)
2009 ((uint16_t *)ha->request_ring)[i] = 1935 ((__le16 *)ha->request_ring)[i] =
2010 cpu_to_le16(risc_code_address[i]); 1936 cpu_to_le16(risc_code_address[i]);
2011 1937
2012 mb[0] = MBC_LOAD_RAM; 1938 mb[0] = MBC_LOAD_RAM;
@@ -2085,7 +2011,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
2085 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2011 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
2086 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2012 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2087 if (err) { 2013 if (err) {
2088 printk(KERN_ERR "scsi(%li): Failed checksum\n", ha->host_no); 2014 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
2089 return err; 2015 return err;
2090 } 2016 }
2091 2017
@@ -2105,14 +2031,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
2105static int 2031static int
2106qla1280_load_firmware(struct scsi_qla_host *ha) 2032qla1280_load_firmware(struct scsi_qla_host *ha)
2107{ 2033{
2108 int err = -ENODEV; 2034 int err;
2109
2110 /* If firmware needs to be loaded */
2111 if (!qla1280_isp_firmware(ha)) {
2112 printk(KERN_ERR "scsi(%li): isp_firmware() failed!\n",
2113 ha->host_no);
2114 goto out;
2115 }
2116 2035
2117 err = qla1280_chip_diag(ha); 2036 err = qla1280_chip_diag(ha);
2118 if (err) 2037 if (err)
@@ -2246,17 +2165,17 @@ qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2246{ 2165{
2247 struct nvram *nv = &ha->nvram; 2166 struct nvram *nv = &ha->nvram;
2248 2167
2249 nv->bus[bus].target[target].parameter.f.renegotiate_on_error = 1; 2168 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2250 nv->bus[bus].target[target].parameter.f.auto_request_sense = 1; 2169 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2251 nv->bus[bus].target[target].parameter.f.tag_queuing = 1; 2170 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2252 nv->bus[bus].target[target].parameter.f.enable_sync = 1; 2171 nv->bus[bus].target[target].parameter.enable_sync = 1;
2253#if 1 /* Some SCSI Processors do not seem to like this */ 2172#if 1 /* Some SCSI Processors do not seem to like this */
2254 nv->bus[bus].target[target].parameter.f.enable_wide = 1; 2173 nv->bus[bus].target[target].parameter.enable_wide = 1;
2255#endif 2174#endif
2256 nv->bus[bus].target[target].parameter.f.parity_checking = 1;
2257 nv->bus[bus].target[target].parameter.f.disconnect_allowed = 1;
2258 nv->bus[bus].target[target].execution_throttle = 2175 nv->bus[bus].target[target].execution_throttle =
2259 nv->bus[bus].max_queue_depth - 1; 2176 nv->bus[bus].max_queue_depth - 1;
2177 nv->bus[bus].target[target].parameter.parity_checking = 1;
2178 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2260 2179
2261 if (IS_ISP1x160(ha)) { 2180 if (IS_ISP1x160(ha)) {
2262 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2181 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
@@ -2284,9 +2203,9 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
2284 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2203 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2285 nv->firmware_feature.f.enable_fast_posting = 1; 2204 nv->firmware_feature.f.enable_fast_posting = 1;
2286 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2205 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2287 nv->termination.f.scsi_bus_0_control = 3; 2206 nv->termination.scsi_bus_0_control = 3;
2288 nv->termination.f.scsi_bus_1_control = 3; 2207 nv->termination.scsi_bus_1_control = 3;
2289 nv->termination.f.auto_term_support = 1; 2208 nv->termination.auto_term_support = 1;
2290 2209
2291 /* 2210 /*
2292 * Set default FIFO magic - What appropriate values would be here 2211 * Set default FIFO magic - What appropriate values would be here
@@ -2296,7 +2215,12 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
2296 * header file provided by QLogic seems to be bogus or incomplete 2215 * header file provided by QLogic seems to be bogus or incomplete
2297 * at best. 2216 * at best.
2298 */ 2217 */
2299 nv->isp_config.c = ISP_CFG1_BENAB|ISP_CFG1_F128; 2218 nv->isp_config.burst_enable = 1;
2219 if (IS_ISP1040(ha))
2220 nv->isp_config.fifo_threshold |= 3;
2221 else
2222 nv->isp_config.fifo_threshold |= 4;
2223
2300 if (IS_ISP1x160(ha)) 2224 if (IS_ISP1x160(ha))
2301 nv->isp_parameter = 0x01; /* fast memory enable */ 2225 nv->isp_parameter = 0x01; /* fast memory enable */
2302 2226
@@ -2327,66 +2251,53 @@ qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2327 struct nvram *nv = &ha->nvram; 2251 struct nvram *nv = &ha->nvram;
2328 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2252 uint16_t mb[MAILBOX_REGISTER_COUNT];
2329 int status, lun; 2253 int status, lun;
2254 uint16_t flag;
2330 2255
2331 /* Set Target Parameters. */ 2256 /* Set Target Parameters. */
2332 mb[0] = MBC_SET_TARGET_PARAMETERS; 2257 mb[0] = MBC_SET_TARGET_PARAMETERS;
2333 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 2258 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2334 mb[1] <<= 8;
2335
2336 /*
2337 * Do not enable wide, sync, and ppr for the initial
2338 * INQUIRY run. We enable this later if we determine
2339 * the target actually supports it.
2340 */
2341 nv->bus[bus].target[target].parameter.f.
2342 auto_request_sense = 1;
2343 nv->bus[bus].target[target].parameter.f.
2344 stop_queue_on_check = 0;
2345
2346 if (IS_ISP1x160(ha))
2347 nv->bus[bus].target[target].ppr_1x160.
2348 flags.enable_ppr = 0;
2349 2259
2350 /* 2260 /*
2351 * No sync, wide, etc. while probing 2261 * Do not enable sync and ppr for the initial INQUIRY run. We
2262 * enable this later if we determine the target actually
2263 * supports it.
2352 */ 2264 */
2353 mb[2] = (nv->bus[bus].target[target].parameter.c << 8) & 2265 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2354 ~(TP_SYNC /*| TP_WIDE | TP_PPR*/); 2266 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2355 2267
2356 if (IS_ISP1x160(ha)) 2268 if (IS_ISP1x160(ha))
2357 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2269 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2358 else 2270 else
2359 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2271 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2360 mb[3] |= nv->bus[bus].target[target].sync_period; 2272 mb[3] |= nv->bus[bus].target[target].sync_period;
2361 2273 status = qla1280_mailbox_command(ha, 0x0f, mb);
2362 status = qla1280_mailbox_command(ha, BIT_3 | BIT_2 | BIT_1 | BIT_0, &mb[0]);
2363 2274
2364 /* Save Tag queuing enable flag. */ 2275 /* Save Tag queuing enable flag. */
2365 mb[0] = BIT_0 << target; 2276 flag = (BIT_0 << target) & mb[0];
2366 if (nv->bus[bus].target[target].parameter.f.tag_queuing) 2277 if (nv->bus[bus].target[target].parameter.tag_queuing)
2367 ha->bus_settings[bus].qtag_enables |= mb[0]; 2278 ha->bus_settings[bus].qtag_enables |= flag;
2368 2279
2369 /* Save Device enable flag. */ 2280 /* Save Device enable flag. */
2370 if (IS_ISP1x160(ha)) { 2281 if (IS_ISP1x160(ha)) {
2371 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2282 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2372 ha->bus_settings[bus].device_enables |= mb[0]; 2283 ha->bus_settings[bus].device_enables |= flag;
2373 ha->bus_settings[bus].lun_disables |= 0; 2284 ha->bus_settings[bus].lun_disables |= 0;
2374 } else { 2285 } else {
2375 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2286 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2376 ha->bus_settings[bus].device_enables |= mb[0]; 2287 ha->bus_settings[bus].device_enables |= flag;
2377 /* Save LUN disable flag. */ 2288 /* Save LUN disable flag. */
2378 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2289 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2379 ha->bus_settings[bus].lun_disables |= mb[0]; 2290 ha->bus_settings[bus].lun_disables |= flag;
2380 } 2291 }
2381 2292
2382 /* Set Device Queue Parameters. */ 2293 /* Set Device Queue Parameters. */
2383 for (lun = 0; lun < MAX_LUNS; lun++) { 2294 for (lun = 0; lun < MAX_LUNS; lun++) {
2384 mb[0] = MBC_SET_DEVICE_QUEUE; 2295 mb[0] = MBC_SET_DEVICE_QUEUE;
2385 mb[1] = (uint16_t)(bus ? target | BIT_7 : target); 2296 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2386 mb[1] = mb[1] << 8 | lun; 2297 mb[1] |= lun;
2387 mb[2] = nv->bus[bus].max_queue_depth; 2298 mb[2] = nv->bus[bus].max_queue_depth;
2388 mb[3] = nv->bus[bus].target[target].execution_throttle; 2299 mb[3] = nv->bus[bus].target[target].execution_throttle;
2389 status |= qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2300 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2390 } 2301 }
2391 2302
2392 return status; 2303 return status;
@@ -2431,7 +2342,6 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2431 struct nvram *nv = &ha->nvram; 2342 struct nvram *nv = &ha->nvram;
2432 int bus, target, status = 0; 2343 int bus, target, status = 0;
2433 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2344 uint16_t mb[MAILBOX_REGISTER_COUNT];
2434 uint16_t mask;
2435 2345
2436 ENTER("qla1280_nvram_config"); 2346 ENTER("qla1280_nvram_config");
2437 2347
@@ -2439,7 +2349,7 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2439 /* Always force AUTO sense for LINUX SCSI */ 2349 /* Always force AUTO sense for LINUX SCSI */
2440 for (bus = 0; bus < MAX_BUSES; bus++) 2350 for (bus = 0; bus < MAX_BUSES; bus++)
2441 for (target = 0; target < MAX_TARGETS; target++) { 2351 for (target = 0; target < MAX_TARGETS; target++) {
2442 nv->bus[bus].target[target].parameter.f. 2352 nv->bus[bus].target[target].parameter.
2443 auto_request_sense = 1; 2353 auto_request_sense = 1;
2444 } 2354 }
2445 } else { 2355 } else {
@@ -2457,31 +2367,40 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2457 2367
2458 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK; 2368 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
2459 2369
2460 cfg1 = RD_REG_WORD(&reg->cfg_1); 2370 cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2461 cdma_conf = RD_REG_WORD(&reg->cdma_cfg); 2371 cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
2462 ddma_conf = RD_REG_WORD(&reg->ddma_cfg); 2372 ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
2463 2373
2464 /* Busted fifo, says mjacob. */ 2374 /* Busted fifo, says mjacob. */
2465 if (hwrev == ISP_CFG0_1040A) 2375 if (hwrev != ISP_CFG0_1040A)
2466 WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64); 2376 cfg1 |= nv->isp_config.fifo_threshold << 4;
2467 else 2377
2468 WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64 | ISP_CFG1_BENAB); 2378 cfg1 |= nv->isp_config.burst_enable << 2;
2379 WRT_REG_WORD(&reg->cfg_1, cfg1);
2469 2380
2470 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2381 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2471 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2382 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2472 } else { 2383 } else {
2384 uint16_t cfg1, term;
2385
2473 /* Set ISP hardware DMA burst */ 2386 /* Set ISP hardware DMA burst */
2474 mb[0] = nv->isp_config.c; 2387 cfg1 = nv->isp_config.fifo_threshold << 4;
2388 cfg1 |= nv->isp_config.burst_enable << 2;
2475 /* Enable DMA arbitration on dual channel controllers */ 2389 /* Enable DMA arbitration on dual channel controllers */
2476 if (ha->ports > 1) 2390 if (ha->ports > 1)
2477 mb[0] |= BIT_13; 2391 cfg1 |= BIT_13;
2478 WRT_REG_WORD(&reg->cfg_1, mb[0]); 2392 WRT_REG_WORD(&reg->cfg_1, cfg1);
2479 2393
2480 /* Set SCSI termination. */ 2394 /* Set SCSI termination. */
2481 WRT_REG_WORD(&reg->gpio_enable, (BIT_3 + BIT_2 + BIT_1 + BIT_0)); 2395 WRT_REG_WORD(&reg->gpio_enable,
2482 mb[0] = nv->termination.c & (BIT_3 + BIT_2 + BIT_1 + BIT_0); 2396 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2483 WRT_REG_WORD(&reg->gpio_data, mb[0]); 2397 term = nv->termination.scsi_bus_1_control;
2398 term |= nv->termination.scsi_bus_0_control << 2;
2399 term |= nv->termination.auto_term_support << 7;
2400 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2401 WRT_REG_WORD(&reg->gpio_data, term);
2484 } 2402 }
2403 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2485 2404
2486 /* ISP parameter word. */ 2405 /* ISP parameter word. */
2487 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2406 mb[0] = MBC_SET_SYSTEM_PARAMETER;
@@ -2497,16 +2416,17 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2497 2416
2498 /* Firmware feature word. */ 2417 /* Firmware feature word. */
2499 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2418 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2500 mask = BIT_5 | BIT_1 | BIT_0; 2419 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2501 mb[1] = le16_to_cpu(nv->firmware_feature.w) & (mask); 2420 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2421 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2502#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2422#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2503 if (ia64_platform_is("sn2")) { 2423 if (ia64_platform_is("sn2")) {
2504 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2424 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2505 "workaround\n", ha->host_no); 2425 "workaround\n", ha->host_no);
2506 mb[1] |= BIT_9; 2426 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2507 } 2427 }
2508#endif 2428#endif
2509 status |= qla1280_mailbox_command(ha, mask, &mb[0]); 2429 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2510 2430
2511 /* Retry count and delay. */ 2431 /* Retry count and delay. */
2512 mb[0] = MBC_SET_RETRY_COUNT; 2432 mb[0] = MBC_SET_RETRY_COUNT;
@@ -2535,27 +2455,27 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2535 mb[2] |= BIT_5; 2455 mb[2] |= BIT_5;
2536 if (nv->bus[1].config_2.data_line_active_negation) 2456 if (nv->bus[1].config_2.data_line_active_negation)
2537 mb[2] |= BIT_4; 2457 mb[2] |= BIT_4;
2538 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2458 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2539 2459
2540 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2460 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2541 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2461 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2542 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2462 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2543 2463
2544 /* thingy */ 2464 /* thingy */
2545 mb[0] = MBC_SET_PCI_CONTROL; 2465 mb[0] = MBC_SET_PCI_CONTROL;
2546 mb[1] = 2; /* Data DMA Channel Burst Enable */ 2466 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2547 mb[2] = 2; /* Command DMA Channel Burst Enable */ 2467 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2548 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2468 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2549 2469
2550 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2470 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2551 mb[1] = 8; 2471 mb[1] = 8;
2552 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2472 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2553 2473
2554 /* Selection timeout. */ 2474 /* Selection timeout. */
2555 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2475 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2556 mb[1] = nv->bus[0].selection_timeout; 2476 mb[1] = nv->bus[0].selection_timeout;
2557 mb[2] = nv->bus[1].selection_timeout; 2477 mb[2] = nv->bus[1].selection_timeout;
2558 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2478 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2559 2479
2560 for (bus = 0; bus < ha->ports; bus++) 2480 for (bus = 0; bus < ha->ports; bus++)
2561 status |= qla1280_config_bus(ha, bus); 2481 status |= qla1280_config_bus(ha, bus);
@@ -3066,7 +2986,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3066 struct scsi_cmnd *cmd = sp->cmd; 2986 struct scsi_cmnd *cmd = sp->cmd;
3067 cmd_a64_entry_t *pkt; 2987 cmd_a64_entry_t *pkt;
3068 struct scatterlist *sg = NULL; 2988 struct scatterlist *sg = NULL;
3069 u32 *dword_ptr; 2989 __le32 *dword_ptr;
3070 dma_addr_t dma_handle; 2990 dma_addr_t dma_handle;
3071 int status = 0; 2991 int status = 0;
3072 int cnt; 2992 int cnt;
@@ -3104,10 +3024,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3104 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3024 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3105 } 3025 }
3106 3026
3027 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3028 ha->req_q_cnt, seg_cnt);
3029
3107 /* If room for request in request ring. */ 3030 /* If room for request in request ring. */
3108 if ((req_cnt + 2) >= ha->req_q_cnt) { 3031 if ((req_cnt + 2) >= ha->req_q_cnt) {
3109 status = 1; 3032 status = 1;
3110 dprintk(2, "qla1280_64bit_start_scsi: in-ptr=0x%x req_q_cnt=" 3033 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
3111 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 3034 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
3112 req_cnt); 3035 req_cnt);
3113 goto out; 3036 goto out;
@@ -3119,7 +3042,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3119 3042
3120 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3043 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3121 status = 1; 3044 status = 1;
3122 dprintk(2, "qla1280_64bit_start_scsi: NO ROOM IN " 3045 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
3123 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 3046 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
3124 goto out; 3047 goto out;
3125 } 3048 }
@@ -3128,7 +3051,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3128 ha->req_q_cnt -= req_cnt; 3051 ha->req_q_cnt -= req_cnt;
3129 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 3052 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
3130 3053
3131 dprintk(2, "64bit_start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 3054 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
3132 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 3055 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
3133 dprintk(2, " bus %i, target %i, lun %i\n", 3056 dprintk(2, " bus %i, target %i, lun %i\n",
3134 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3057 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
@@ -3350,7 +3273,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3350 struct scsi_cmnd *cmd = sp->cmd; 3273 struct scsi_cmnd *cmd = sp->cmd;
3351 struct cmd_entry *pkt; 3274 struct cmd_entry *pkt;
3352 struct scatterlist *sg = NULL; 3275 struct scatterlist *sg = NULL;
3353 uint32_t *dword_ptr; 3276 __le32 *dword_ptr;
3354 int status = 0; 3277 int status = 0;
3355 int cnt; 3278 int cnt;
3356 int req_cnt; 3279 int req_cnt;
@@ -3993,21 +3916,21 @@ qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha)
3993 result = cmd->request_buffer; 3916 result = cmd->request_buffer;
3994 n = &ha->nvram; 3917 n = &ha->nvram;
3995 3918
3996 n->bus[bus].target[target].parameter.f.enable_wide = 0; 3919 n->bus[bus].target[target].parameter.enable_wide = 0;
3997 n->bus[bus].target[target].parameter.f.enable_sync = 0; 3920 n->bus[bus].target[target].parameter.enable_sync = 0;
3998 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 3921 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
3999 3922
4000 if (result[7] & 0x60) 3923 if (result[7] & 0x60)
4001 n->bus[bus].target[target].parameter.f.enable_wide = 1; 3924 n->bus[bus].target[target].parameter.enable_wide = 1;
4002 if (result[7] & 0x10) 3925 if (result[7] & 0x10)
4003 n->bus[bus].target[target].parameter.f.enable_sync = 1; 3926 n->bus[bus].target[target].parameter.enable_sync = 1;
4004 if ((result[2] >= 3) && (result[4] + 5 > 56) && 3927 if ((result[2] >= 3) && (result[4] + 5 > 56) &&
4005 (result[56] & 0x4)) 3928 (result[56] & 0x4))
4006 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 3929 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
4007 3930
4008 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n", 3931 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n",
4009 n->bus[bus].target[target].parameter.f.enable_wide, 3932 n->bus[bus].target[target].parameter.enable_wide,
4010 n->bus[bus].target[target].parameter.f.enable_sync, 3933 n->bus[bus].target[target].parameter.enable_sync,
4011 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr); 3934 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr);
4012} 3935}
4013#endif 3936#endif
@@ -4071,7 +3994,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
4071 /* Save ISP completion status */ 3994 /* Save ISP completion status */
4072 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3995 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
4073 3996
4074 if (scsi_status & SS_CHECK_CONDITION) { 3997 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
4075 if (comp_status != CS_ARS_FAILED) { 3998 if (comp_status != CS_ARS_FAILED) {
4076 uint16_t req_sense_length = 3999 uint16_t req_sense_length =
4077 le16_to_cpu(pkt->req_sense_length); 4000 le16_to_cpu(pkt->req_sense_length);
@@ -4650,7 +4573,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4650 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4573 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
4651 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4574 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
4652 printk(KERN_WARNING "scsi(%li): Unable to set a " 4575 printk(KERN_WARNING "scsi(%li): Unable to set a "
4653 " suitable DMA mask - aboring\n", ha->host_no); 4576 "suitable DMA mask - aborting\n", ha->host_no);
4654 error = -ENODEV; 4577 error = -ENODEV;
4655 goto error_free_irq; 4578 goto error_free_irq;
4656 } 4579 }
@@ -4660,14 +4583,14 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4660#else 4583#else
4661 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4584 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
4662 printk(KERN_WARNING "scsi(%li): Unable to set a " 4585 printk(KERN_WARNING "scsi(%li): Unable to set a "
4663 " suitable DMA mask - aboring\n", ha->host_no); 4586 "suitable DMA mask - aborting\n", ha->host_no);
4664 error = -ENODEV; 4587 error = -ENODEV;
4665 goto error_free_irq; 4588 goto error_free_irq;
4666 } 4589 }
4667#endif 4590#endif
4668 4591
4669 ha->request_ring = pci_alloc_consistent(ha->pdev, 4592 ha->request_ring = pci_alloc_consistent(ha->pdev,
4670 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4593 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4671 &ha->request_dma); 4594 &ha->request_dma);
4672 if (!ha->request_ring) { 4595 if (!ha->request_ring) {
4673 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4596 printk(KERN_INFO "qla1280: Failed to get request memory\n");
@@ -4675,7 +4598,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4675 } 4598 }
4676 4599
4677 ha->response_ring = pci_alloc_consistent(ha->pdev, 4600 ha->response_ring = pci_alloc_consistent(ha->pdev,
4678 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4601 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4679 &ha->response_dma); 4602 &ha->response_dma);
4680 if (!ha->response_ring) { 4603 if (!ha->response_ring) {
4681 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4604 printk(KERN_INFO "qla1280: Failed to get response memory\n");
@@ -4758,7 +4681,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4758 4681
4759#if LINUX_VERSION_CODE >= 0x020600 4682#if LINUX_VERSION_CODE >= 0x020600
4760 error_disable_adapter: 4683 error_disable_adapter:
4761 WRT_REG_WORD(&ha->iobase->ictrl, 0); 4684 qla1280_disable_intrs(ha);
4762#endif 4685#endif
4763 error_free_irq: 4686 error_free_irq:
4764 free_irq(pdev->irq, ha); 4687 free_irq(pdev->irq, ha);
@@ -4770,11 +4693,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4770#endif 4693#endif
4771 error_free_response_ring: 4694 error_free_response_ring:
4772 pci_free_consistent(ha->pdev, 4695 pci_free_consistent(ha->pdev,
4773 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4696 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4774 ha->response_ring, ha->response_dma); 4697 ha->response_ring, ha->response_dma);
4775 error_free_request_ring: 4698 error_free_request_ring:
4776 pci_free_consistent(ha->pdev, 4699 pci_free_consistent(ha->pdev,
4777 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4700 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4778 ha->request_ring, ha->request_dma); 4701 ha->request_ring, ha->request_dma);
4779 error_put_host: 4702 error_put_host:
4780 scsi_host_put(host); 4703 scsi_host_put(host);
@@ -4795,7 +4718,7 @@ qla1280_remove_one(struct pci_dev *pdev)
4795 scsi_remove_host(host); 4718 scsi_remove_host(host);
4796#endif 4719#endif
4797 4720
4798 WRT_REG_WORD(&ha->iobase->ictrl, 0); 4721 qla1280_disable_intrs(ha);
4799 4722
4800 free_irq(pdev->irq, ha); 4723 free_irq(pdev->irq, ha);
4801 4724
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d245ae07518e..59915fb70301 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -94,9 +94,6 @@
94#define REQUEST_ENTRY_CNT 256 /* Number of request entries. */ 94#define REQUEST_ENTRY_CNT 256 /* Number of request entries. */
95#define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */ 95#define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */
96 96
97/* Number of segments 1 - 65535 */
98#define SG_SEGMENTS 32 /* Cmd entry + 6 continuations */
99
100/* 97/*
101 * SCSI Request Block structure (sp) that is placed 98 * SCSI Request Block structure (sp) that is placed
102 * on cmd->SCp location of every I/O 99 * on cmd->SCp location of every I/O
@@ -378,29 +375,23 @@ struct nvram {
378 uint16_t unused_12; /* 12, 13 */ 375 uint16_t unused_12; /* 12, 13 */
379 uint16_t unused_14; /* 14, 15 */ 376 uint16_t unused_14; /* 14, 15 */
380 377
381 union { 378 struct {
382 uint8_t c; 379 uint8_t reserved:2;
383 struct { 380 uint8_t burst_enable:1;
384 uint8_t reserved:2; 381 uint8_t reserved_1:1;
385 uint8_t burst_enable:1; 382 uint8_t fifo_threshold:4;
386 uint8_t reserved_1:1;
387 uint8_t fifo_threshold:4;
388 } f;
389 } isp_config; /* 16 */ 383 } isp_config; /* 16 */
390 384
391 /* Termination 385 /* Termination
392 * 0 = Disable, 1 = high only, 3 = Auto term 386 * 0 = Disable, 1 = high only, 3 = Auto term
393 */ 387 */
394 union { 388 struct {
395 uint8_t c; 389 uint8_t scsi_bus_1_control:2;
396 struct { 390 uint8_t scsi_bus_0_control:2;
397 uint8_t scsi_bus_1_control:2; 391 uint8_t unused_0:1;
398 uint8_t scsi_bus_0_control:2; 392 uint8_t unused_1:1;
399 uint8_t unused_0:1; 393 uint8_t unused_2:1;
400 uint8_t unused_1:1; 394 uint8_t auto_term_support:1;
401 uint8_t unused_2:1;
402 uint8_t auto_term_support:1;
403 } f;
404 } termination; /* 17 */ 395 } termination; /* 17 */
405 396
406 uint16_t isp_parameter; /* 18, 19 */ 397 uint16_t isp_parameter; /* 18, 19 */
@@ -460,18 +451,15 @@ struct nvram {
460 uint16_t unused_38; /* 38, 39 */ 451 uint16_t unused_38; /* 38, 39 */
461 452
462 struct { 453 struct {
463 union { 454 struct {
464 uint8_t c; 455 uint8_t renegotiate_on_error:1;
465 struct { 456 uint8_t stop_queue_on_check:1;
466 uint8_t renegotiate_on_error:1; 457 uint8_t auto_request_sense:1;
467 uint8_t stop_queue_on_check:1; 458 uint8_t tag_queuing:1;
468 uint8_t auto_request_sense:1; 459 uint8_t enable_sync:1;
469 uint8_t tag_queuing:1; 460 uint8_t enable_wide:1;
470 uint8_t enable_sync:1; 461 uint8_t parity_checking:1;
471 uint8_t enable_wide:1; 462 uint8_t disconnect_allowed:1;
472 uint8_t parity_checking:1;
473 uint8_t disconnect_allowed:1;
474 } f;
475 } parameter; /* 40 */ 463 } parameter; /* 40 */
476 464
477 uint8_t execution_throttle; /* 41 */ 465 uint8_t execution_throttle; /* 41 */
@@ -528,23 +516,23 @@ struct cmd_entry {
528 uint8_t entry_count; /* Entry count. */ 516 uint8_t entry_count; /* Entry count. */
529 uint8_t sys_define; /* System defined. */ 517 uint8_t sys_define; /* System defined. */
530 uint8_t entry_status; /* Entry Status. */ 518 uint8_t entry_status; /* Entry Status. */
531 uint32_t handle; /* System handle. */ 519 __le32 handle; /* System handle. */
532 uint8_t lun; /* SCSI LUN */ 520 uint8_t lun; /* SCSI LUN */
533 uint8_t target; /* SCSI ID */ 521 uint8_t target; /* SCSI ID */
534 uint16_t cdb_len; /* SCSI command length. */ 522 __le16 cdb_len; /* SCSI command length. */
535 uint16_t control_flags; /* Control flags. */ 523 __le16 control_flags; /* Control flags. */
536 uint16_t reserved; 524 __le16 reserved;
537 uint16_t timeout; /* Command timeout. */ 525 __le16 timeout; /* Command timeout. */
538 uint16_t dseg_count; /* Data segment count. */ 526 __le16 dseg_count; /* Data segment count. */
539 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 527 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
540 uint32_t dseg_0_address; /* Data segment 0 address. */ 528 __le32 dseg_0_address; /* Data segment 0 address. */
541 uint32_t dseg_0_length; /* Data segment 0 length. */ 529 __le32 dseg_0_length; /* Data segment 0 length. */
542 uint32_t dseg_1_address; /* Data segment 1 address. */ 530 __le32 dseg_1_address; /* Data segment 1 address. */
543 uint32_t dseg_1_length; /* Data segment 1 length. */ 531 __le32 dseg_1_length; /* Data segment 1 length. */
544 uint32_t dseg_2_address; /* Data segment 2 address. */ 532 __le32 dseg_2_address; /* Data segment 2 address. */
545 uint32_t dseg_2_length; /* Data segment 2 length. */ 533 __le32 dseg_2_length; /* Data segment 2 length. */
546 uint32_t dseg_3_address; /* Data segment 3 address. */ 534 __le32 dseg_3_address; /* Data segment 3 address. */
547 uint32_t dseg_3_length; /* Data segment 3 length. */ 535 __le32 dseg_3_length; /* Data segment 3 length. */
548}; 536};
549 537
550/* 538/*
@@ -556,21 +544,21 @@ struct cont_entry {
556 uint8_t entry_count; /* Entry count. */ 544 uint8_t entry_count; /* Entry count. */
557 uint8_t sys_define; /* System defined. */ 545 uint8_t sys_define; /* System defined. */
558 uint8_t entry_status; /* Entry Status. */ 546 uint8_t entry_status; /* Entry Status. */
559 uint32_t reserved; /* Reserved */ 547 __le32 reserved; /* Reserved */
560 uint32_t dseg_0_address; /* Data segment 0 address. */ 548 __le32 dseg_0_address; /* Data segment 0 address. */
561 uint32_t dseg_0_length; /* Data segment 0 length. */ 549 __le32 dseg_0_length; /* Data segment 0 length. */
562 uint32_t dseg_1_address; /* Data segment 1 address. */ 550 __le32 dseg_1_address; /* Data segment 1 address. */
563 uint32_t dseg_1_length; /* Data segment 1 length. */ 551 __le32 dseg_1_length; /* Data segment 1 length. */
564 uint32_t dseg_2_address; /* Data segment 2 address. */ 552 __le32 dseg_2_address; /* Data segment 2 address. */
565 uint32_t dseg_2_length; /* Data segment 2 length. */ 553 __le32 dseg_2_length; /* Data segment 2 length. */
566 uint32_t dseg_3_address; /* Data segment 3 address. */ 554 __le32 dseg_3_address; /* Data segment 3 address. */
567 uint32_t dseg_3_length; /* Data segment 3 length. */ 555 __le32 dseg_3_length; /* Data segment 3 length. */
568 uint32_t dseg_4_address; /* Data segment 4 address. */ 556 __le32 dseg_4_address; /* Data segment 4 address. */
569 uint32_t dseg_4_length; /* Data segment 4 length. */ 557 __le32 dseg_4_length; /* Data segment 4 length. */
570 uint32_t dseg_5_address; /* Data segment 5 address. */ 558 __le32 dseg_5_address; /* Data segment 5 address. */
571 uint32_t dseg_5_length; /* Data segment 5 length. */ 559 __le32 dseg_5_length; /* Data segment 5 length. */
572 uint32_t dseg_6_address; /* Data segment 6 address. */ 560 __le32 dseg_6_address; /* Data segment 6 address. */
573 uint32_t dseg_6_length; /* Data segment 6 length. */ 561 __le32 dseg_6_length; /* Data segment 6 length. */
574}; 562};
575 563
576/* 564/*
@@ -586,22 +574,22 @@ struct response {
586#define RF_FULL BIT_1 /* Full */ 574#define RF_FULL BIT_1 /* Full */
587#define RF_BAD_HEADER BIT_2 /* Bad header. */ 575#define RF_BAD_HEADER BIT_2 /* Bad header. */
588#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */ 576#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */
589 uint32_t handle; /* System handle. */ 577 __le32 handle; /* System handle. */
590 uint16_t scsi_status; /* SCSI status. */ 578 __le16 scsi_status; /* SCSI status. */
591 uint16_t comp_status; /* Completion status. */ 579 __le16 comp_status; /* Completion status. */
592 uint16_t state_flags; /* State flags. */ 580 __le16 state_flags; /* State flags. */
593#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */ 581#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */
594#define SF_GOT_SENSE BIT_13 /* Got Sense */ 582#define SF_GOT_SENSE BIT_13 /* Got Sense */
595#define SF_GOT_STATUS BIT_12 /* Got Status */ 583#define SF_GOT_STATUS BIT_12 /* Got Status */
596#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */ 584#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */
597#define SF_SENT_CDB BIT_10 /* Send CDB */ 585#define SF_SENT_CDB BIT_10 /* Send CDB */
598#define SF_GOT_TARGET BIT_9 /* */ 586#define SF_GOT_TARGET BIT_9 /* */
599#define SF_GOT_BUS BIT_8 /* */ 587#define SF_GOT_BUS BIT_8 /* */
600 uint16_t status_flags; /* Status flags. */ 588 __le16 status_flags; /* Status flags. */
601 uint16_t time; /* Time. */ 589 __le16 time; /* Time. */
602 uint16_t req_sense_length; /* Request sense data length. */ 590 __le16 req_sense_length;/* Request sense data length. */
603 uint32_t residual_length; /* Residual transfer length. */ 591 __le32 residual_length; /* Residual transfer length. */
604 uint16_t reserved[4]; 592 __le16 reserved[4];
605 uint8_t req_sense_data[32]; /* Request sense data. */ 593 uint8_t req_sense_data[32]; /* Request sense data. */
606}; 594};
607 595
@@ -614,7 +602,7 @@ struct mrk_entry {
614 uint8_t entry_count; /* Entry count. */ 602 uint8_t entry_count; /* Entry count. */
615 uint8_t sys_define; /* System defined. */ 603 uint8_t sys_define; /* System defined. */
616 uint8_t entry_status; /* Entry Status. */ 604 uint8_t entry_status; /* Entry Status. */
617 uint32_t reserved; 605 __le32 reserved;
618 uint8_t lun; /* SCSI LUN */ 606 uint8_t lun; /* SCSI LUN */
619 uint8_t target; /* SCSI ID */ 607 uint8_t target; /* SCSI ID */
620 uint8_t modifier; /* Modifier (7-0). */ 608 uint8_t modifier; /* Modifier (7-0). */
@@ -638,11 +626,11 @@ struct ecmd_entry {
638 uint32_t handle; /* System handle. */ 626 uint32_t handle; /* System handle. */
639 uint8_t lun; /* SCSI LUN */ 627 uint8_t lun; /* SCSI LUN */
640 uint8_t target; /* SCSI ID */ 628 uint8_t target; /* SCSI ID */
641 uint16_t cdb_len; /* SCSI command length. */ 629 __le16 cdb_len; /* SCSI command length. */
642 uint16_t control_flags; /* Control flags. */ 630 __le16 control_flags; /* Control flags. */
643 uint16_t reserved; 631 __le16 reserved;
644 uint16_t timeout; /* Command timeout. */ 632 __le16 timeout; /* Command timeout. */
645 uint16_t dseg_count; /* Data segment count. */ 633 __le16 dseg_count; /* Data segment count. */
646 uint8_t scsi_cdb[88]; /* SCSI command words. */ 634 uint8_t scsi_cdb[88]; /* SCSI command words. */
647}; 635};
648 636
@@ -655,20 +643,20 @@ typedef struct {
655 uint8_t entry_count; /* Entry count. */ 643 uint8_t entry_count; /* Entry count. */
656 uint8_t sys_define; /* System defined. */ 644 uint8_t sys_define; /* System defined. */
657 uint8_t entry_status; /* Entry Status. */ 645 uint8_t entry_status; /* Entry Status. */
658 uint32_t handle; /* System handle. */ 646 __le32 handle; /* System handle. */
659 uint8_t lun; /* SCSI LUN */ 647 uint8_t lun; /* SCSI LUN */
660 uint8_t target; /* SCSI ID */ 648 uint8_t target; /* SCSI ID */
661 uint16_t cdb_len; /* SCSI command length. */ 649 __le16 cdb_len; /* SCSI command length. */
662 uint16_t control_flags; /* Control flags. */ 650 __le16 control_flags; /* Control flags. */
663 uint16_t reserved; 651 __le16 reserved;
664 uint16_t timeout; /* Command timeout. */ 652 __le16 timeout; /* Command timeout. */
665 uint16_t dseg_count; /* Data segment count. */ 653 __le16 dseg_count; /* Data segment count. */
666 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 654 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
667 uint32_t reserved_1[2]; /* unused */ 655 __le32 reserved_1[2]; /* unused */
668 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 656 __le32 dseg_0_address[2]; /* Data segment 0 address. */
669 uint32_t dseg_0_length; /* Data segment 0 length. */ 657 __le32 dseg_0_length; /* Data segment 0 length. */
670 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 658 __le32 dseg_1_address[2]; /* Data segment 1 address. */
671 uint32_t dseg_1_length; /* Data segment 1 length. */ 659 __le32 dseg_1_length; /* Data segment 1 length. */
672} cmd_a64_entry_t, request_t; 660} cmd_a64_entry_t, request_t;
673 661
674/* 662/*
@@ -680,16 +668,16 @@ struct cont_a64_entry {
680 uint8_t entry_count; /* Entry count. */ 668 uint8_t entry_count; /* Entry count. */
681 uint8_t sys_define; /* System defined. */ 669 uint8_t sys_define; /* System defined. */
682 uint8_t entry_status; /* Entry Status. */ 670 uint8_t entry_status; /* Entry Status. */
683 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 671 __le32 dseg_0_address[2]; /* Data segment 0 address. */
684 uint32_t dseg_0_length; /* Data segment 0 length. */ 672 __le32 dseg_0_length; /* Data segment 0 length. */
685 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 673 __le32 dseg_1_address[2]; /* Data segment 1 address. */
686 uint32_t dseg_1_length; /* Data segment 1 length. */ 674 __le32 dseg_1_length; /* Data segment 1 length. */
687 uint32_t dseg_2_address[2]; /* Data segment 2 address. */ 675 __le32 dseg_2_address[2]; /* Data segment 2 address. */
688 uint32_t dseg_2_length; /* Data segment 2 length. */ 676 __le32 dseg_2_length; /* Data segment 2 length. */
689 uint32_t dseg_3_address[2]; /* Data segment 3 address. */ 677 __le32 dseg_3_address[2]; /* Data segment 3 address. */
690 uint32_t dseg_3_length; /* Data segment 3 length. */ 678 __le32 dseg_3_length; /* Data segment 3 length. */
691 uint32_t dseg_4_address[2]; /* Data segment 4 address. */ 679 __le32 dseg_4_address[2]; /* Data segment 4 address. */
692 uint32_t dseg_4_length; /* Data segment 4 length. */ 680 __le32 dseg_4_length; /* Data segment 4 length. */
693}; 681};
694 682
695/* 683/*
@@ -701,10 +689,10 @@ struct elun_entry {
701 uint8_t entry_count; /* Entry count. */ 689 uint8_t entry_count; /* Entry count. */
702 uint8_t reserved_1; 690 uint8_t reserved_1;
703 uint8_t entry_status; /* Entry Status not used. */ 691 uint8_t entry_status; /* Entry Status not used. */
704 uint32_t reserved_2; 692 __le32 reserved_2;
705 uint16_t lun; /* Bit 15 is bus number. */ 693 __le16 lun; /* Bit 15 is bus number. */
706 uint16_t reserved_4; 694 __le16 reserved_4;
707 uint32_t option_flags; 695 __le32 option_flags;
708 uint8_t status; 696 uint8_t status;
709 uint8_t reserved_5; 697 uint8_t reserved_5;
710 uint8_t command_count; /* Number of ATIOs allocated. */ 698 uint8_t command_count; /* Number of ATIOs allocated. */
@@ -714,8 +702,8 @@ struct elun_entry {
714 /* commands (2-26). */ 702 /* commands (2-26). */
715 uint8_t group_7_length; /* SCSI CDB length for group 7 */ 703 uint8_t group_7_length; /* SCSI CDB length for group 7 */
716 /* commands (2-26). */ 704 /* commands (2-26). */
717 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 705 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
718 uint16_t reserved_6[20]; 706 __le16 reserved_6[20];
719}; 707};
720 708
721/* 709/*
@@ -729,20 +717,20 @@ struct modify_lun_entry {
729 uint8_t entry_count; /* Entry count. */ 717 uint8_t entry_count; /* Entry count. */
730 uint8_t reserved_1; 718 uint8_t reserved_1;
731 uint8_t entry_status; /* Entry Status. */ 719 uint8_t entry_status; /* Entry Status. */
732 uint32_t reserved_2; 720 __le32 reserved_2;
733 uint8_t lun; /* SCSI LUN */ 721 uint8_t lun; /* SCSI LUN */
734 uint8_t reserved_3; 722 uint8_t reserved_3;
735 uint8_t operators; 723 uint8_t operators;
736 uint8_t reserved_4; 724 uint8_t reserved_4;
737 uint32_t option_flags; 725 __le32 option_flags;
738 uint8_t status; 726 uint8_t status;
739 uint8_t reserved_5; 727 uint8_t reserved_5;
740 uint8_t command_count; /* Number of ATIOs allocated. */ 728 uint8_t command_count; /* Number of ATIOs allocated. */
741 uint8_t immed_notify_count; /* Number of Immediate Notify */ 729 uint8_t immed_notify_count; /* Number of Immediate Notify */
742 /* entries allocated. */ 730 /* entries allocated. */
743 uint16_t reserved_6; 731 __le16 reserved_6;
744 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 732 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
745 uint16_t reserved_7[20]; 733 __le16 reserved_7[20];
746}; 734};
747 735
748/* 736/*
@@ -754,20 +742,20 @@ struct notify_entry {
754 uint8_t entry_count; /* Entry count. */ 742 uint8_t entry_count; /* Entry count. */
755 uint8_t reserved_1; 743 uint8_t reserved_1;
756 uint8_t entry_status; /* Entry Status. */ 744 uint8_t entry_status; /* Entry Status. */
757 uint32_t reserved_2; 745 __le32 reserved_2;
758 uint8_t lun; 746 uint8_t lun;
759 uint8_t initiator_id; 747 uint8_t initiator_id;
760 uint8_t reserved_3; 748 uint8_t reserved_3;
761 uint8_t target_id; 749 uint8_t target_id;
762 uint32_t option_flags; 750 __le32 option_flags;
763 uint8_t status; 751 uint8_t status;
764 uint8_t reserved_4; 752 uint8_t reserved_4;
765 uint8_t tag_value; /* Received queue tag message value */ 753 uint8_t tag_value; /* Received queue tag message value */
766 uint8_t tag_type; /* Received queue tag message type */ 754 uint8_t tag_type; /* Received queue tag message type */
767 /* entries allocated. */ 755 /* entries allocated. */
768 uint16_t seq_id; 756 __le16 seq_id;
769 uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */ 757 uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */
770 uint16_t reserved_5[8]; 758 __le16 reserved_5[8];
771 uint8_t sense_data[18]; 759 uint8_t sense_data[18];
772}; 760};
773 761
@@ -780,16 +768,16 @@ struct nack_entry {
780 uint8_t entry_count; /* Entry count. */ 768 uint8_t entry_count; /* Entry count. */
781 uint8_t reserved_1; 769 uint8_t reserved_1;
782 uint8_t entry_status; /* Entry Status. */ 770 uint8_t entry_status; /* Entry Status. */
783 uint32_t reserved_2; 771 __le32 reserved_2;
784 uint8_t lun; 772 uint8_t lun;
785 uint8_t initiator_id; 773 uint8_t initiator_id;
786 uint8_t reserved_3; 774 uint8_t reserved_3;
787 uint8_t target_id; 775 uint8_t target_id;
788 uint32_t option_flags; 776 __le32 option_flags;
789 uint8_t status; 777 uint8_t status;
790 uint8_t event; 778 uint8_t event;
791 uint16_t seq_id; 779 __le16 seq_id;
792 uint16_t reserved_4[22]; 780 __le16 reserved_4[22];
793}; 781};
794 782
795/* 783/*
@@ -801,12 +789,12 @@ struct atio_entry {
801 uint8_t entry_count; /* Entry count. */ 789 uint8_t entry_count; /* Entry count. */
802 uint8_t reserved_1; 790 uint8_t reserved_1;
803 uint8_t entry_status; /* Entry Status. */ 791 uint8_t entry_status; /* Entry Status. */
804 uint32_t reserved_2; 792 __le32 reserved_2;
805 uint8_t lun; 793 uint8_t lun;
806 uint8_t initiator_id; 794 uint8_t initiator_id;
807 uint8_t cdb_len; 795 uint8_t cdb_len;
808 uint8_t target_id; 796 uint8_t target_id;
809 uint32_t option_flags; 797 __le32 option_flags;
810 uint8_t status; 798 uint8_t status;
811 uint8_t scsi_status; 799 uint8_t scsi_status;
812 uint8_t tag_value; /* Received queue tag message value */ 800 uint8_t tag_value; /* Received queue tag message value */
@@ -824,28 +812,28 @@ struct ctio_entry {
824 uint8_t entry_count; /* Entry count. */ 812 uint8_t entry_count; /* Entry count. */
825 uint8_t reserved_1; 813 uint8_t reserved_1;
826 uint8_t entry_status; /* Entry Status. */ 814 uint8_t entry_status; /* Entry Status. */
827 uint32_t reserved_2; 815 __le32 reserved_2;
828 uint8_t lun; /* SCSI LUN */ 816 uint8_t lun; /* SCSI LUN */
829 uint8_t initiator_id; 817 uint8_t initiator_id;
830 uint8_t reserved_3; 818 uint8_t reserved_3;
831 uint8_t target_id; 819 uint8_t target_id;
832 uint32_t option_flags; 820 __le32 option_flags;
833 uint8_t status; 821 uint8_t status;
834 uint8_t scsi_status; 822 uint8_t scsi_status;
835 uint8_t tag_value; /* Received queue tag message value */ 823 uint8_t tag_value; /* Received queue tag message value */
836 uint8_t tag_type; /* Received queue tag message type */ 824 uint8_t tag_type; /* Received queue tag message type */
837 uint32_t transfer_length; 825 __le32 transfer_length;
838 uint32_t residual; 826 __le32 residual;
839 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 827 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
840 uint16_t dseg_count; /* Data segment count. */ 828 __le16 dseg_count; /* Data segment count. */
841 uint32_t dseg_0_address; /* Data segment 0 address. */ 829 __le32 dseg_0_address; /* Data segment 0 address. */
842 uint32_t dseg_0_length; /* Data segment 0 length. */ 830 __le32 dseg_0_length; /* Data segment 0 length. */
843 uint32_t dseg_1_address; /* Data segment 1 address. */ 831 __le32 dseg_1_address; /* Data segment 1 address. */
844 uint32_t dseg_1_length; /* Data segment 1 length. */ 832 __le32 dseg_1_length; /* Data segment 1 length. */
845 uint32_t dseg_2_address; /* Data segment 2 address. */ 833 __le32 dseg_2_address; /* Data segment 2 address. */
846 uint32_t dseg_2_length; /* Data segment 2 length. */ 834 __le32 dseg_2_length; /* Data segment 2 length. */
847 uint32_t dseg_3_address; /* Data segment 3 address. */ 835 __le32 dseg_3_address; /* Data segment 3 address. */
848 uint32_t dseg_3_length; /* Data segment 3 length. */ 836 __le32 dseg_3_length; /* Data segment 3 length. */
849}; 837};
850 838
851/* 839/*
@@ -857,24 +845,24 @@ struct ctio_ret_entry {
857 uint8_t entry_count; /* Entry count. */ 845 uint8_t entry_count; /* Entry count. */
858 uint8_t reserved_1; 846 uint8_t reserved_1;
859 uint8_t entry_status; /* Entry Status. */ 847 uint8_t entry_status; /* Entry Status. */
860 uint32_t reserved_2; 848 __le32 reserved_2;
861 uint8_t lun; /* SCSI LUN */ 849 uint8_t lun; /* SCSI LUN */
862 uint8_t initiator_id; 850 uint8_t initiator_id;
863 uint8_t reserved_3; 851 uint8_t reserved_3;
864 uint8_t target_id; 852 uint8_t target_id;
865 uint32_t option_flags; 853 __le32 option_flags;
866 uint8_t status; 854 uint8_t status;
867 uint8_t scsi_status; 855 uint8_t scsi_status;
868 uint8_t tag_value; /* Received queue tag message value */ 856 uint8_t tag_value; /* Received queue tag message value */
869 uint8_t tag_type; /* Received queue tag message type */ 857 uint8_t tag_type; /* Received queue tag message type */
870 uint32_t transfer_length; 858 __le32 transfer_length;
871 uint32_t residual; 859 __le32 residual;
872 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 860 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
873 uint16_t dseg_count; /* Data segment count. */ 861 __le16 dseg_count; /* Data segment count. */
874 uint32_t dseg_0_address; /* Data segment 0 address. */ 862 __le32 dseg_0_address; /* Data segment 0 address. */
875 uint32_t dseg_0_length; /* Data segment 0 length. */ 863 __le32 dseg_0_length; /* Data segment 0 length. */
876 uint32_t dseg_1_address; /* Data segment 1 address. */ 864 __le32 dseg_1_address; /* Data segment 1 address. */
877 uint16_t dseg_1_length; /* Data segment 1 length. */ 865 __le16 dseg_1_length; /* Data segment 1 length. */
878 uint8_t sense_data[18]; 866 uint8_t sense_data[18];
879}; 867};
880 868
@@ -887,25 +875,25 @@ struct ctio_a64_entry {
887 uint8_t entry_count; /* Entry count. */ 875 uint8_t entry_count; /* Entry count. */
888 uint8_t reserved_1; 876 uint8_t reserved_1;
889 uint8_t entry_status; /* Entry Status. */ 877 uint8_t entry_status; /* Entry Status. */
890 uint32_t reserved_2; 878 __le32 reserved_2;
891 uint8_t lun; /* SCSI LUN */ 879 uint8_t lun; /* SCSI LUN */
892 uint8_t initiator_id; 880 uint8_t initiator_id;
893 uint8_t reserved_3; 881 uint8_t reserved_3;
894 uint8_t target_id; 882 uint8_t target_id;
895 uint32_t option_flags; 883 __le32 option_flags;
896 uint8_t status; 884 uint8_t status;
897 uint8_t scsi_status; 885 uint8_t scsi_status;
898 uint8_t tag_value; /* Received queue tag message value */ 886 uint8_t tag_value; /* Received queue tag message value */
899 uint8_t tag_type; /* Received queue tag message type */ 887 uint8_t tag_type; /* Received queue tag message type */
900 uint32_t transfer_length; 888 __le32 transfer_length;
901 uint32_t residual; 889 __le32 residual;
902 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 890 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
903 uint16_t dseg_count; /* Data segment count. */ 891 __le16 dseg_count; /* Data segment count. */
904 uint32_t reserved_4[2]; 892 __le32 reserved_4[2];
905 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 893 __le32 dseg_0_address[2];/* Data segment 0 address. */
906 uint32_t dseg_0_length; /* Data segment 0 length. */ 894 __le32 dseg_0_length; /* Data segment 0 length. */
907 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 895 __le32 dseg_1_address[2];/* Data segment 1 address. */
908 uint32_t dseg_1_length; /* Data segment 1 length. */ 896 __le32 dseg_1_length; /* Data segment 1 length. */
909}; 897};
910 898
911/* 899/*
@@ -917,21 +905,21 @@ struct ctio_a64_ret_entry {
917 uint8_t entry_count; /* Entry count. */ 905 uint8_t entry_count; /* Entry count. */
918 uint8_t reserved_1; 906 uint8_t reserved_1;
919 uint8_t entry_status; /* Entry Status. */ 907 uint8_t entry_status; /* Entry Status. */
920 uint32_t reserved_2; 908 __le32 reserved_2;
921 uint8_t lun; /* SCSI LUN */ 909 uint8_t lun; /* SCSI LUN */
922 uint8_t initiator_id; 910 uint8_t initiator_id;
923 uint8_t reserved_3; 911 uint8_t reserved_3;
924 uint8_t target_id; 912 uint8_t target_id;
925 uint32_t option_flags; 913 __le32 option_flags;
926 uint8_t status; 914 uint8_t status;
927 uint8_t scsi_status; 915 uint8_t scsi_status;
928 uint8_t tag_value; /* Received queue tag message value */ 916 uint8_t tag_value; /* Received queue tag message value */
929 uint8_t tag_type; /* Received queue tag message type */ 917 uint8_t tag_type; /* Received queue tag message type */
930 uint32_t transfer_length; 918 __le32 transfer_length;
931 uint32_t residual; 919 __le32 residual;
932 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 920 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
933 uint16_t dseg_count; /* Data segment count. */ 921 __le16 dseg_count; /* Data segment count. */
934 uint16_t reserved_4[7]; 922 __le16 reserved_4[7];
935 uint8_t sense_data[18]; 923 uint8_t sense_data[18];
936}; 924};
937 925
@@ -979,14 +967,6 @@ struct ctio_a64_ret_entry {
979#define CS_RETRY 0x82 /* Driver defined */ 967#define CS_RETRY 0x82 /* Driver defined */
980 968
981/* 969/*
982 * ISP status entry - SCSI status byte bit definitions.
983 */
984#define SS_CHECK_CONDITION BIT_1
985#define SS_CONDITION_MET BIT_2
986#define SS_BUSY_CONDITION BIT_3
987#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
988
989/*
990 * ISP target entries - Option flags bit definitions. 970 * ISP target entries - Option flags bit definitions.
991 */ 971 */
992#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */ 972#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */
@@ -1082,10 +1062,6 @@ struct scsi_qla_host {
1082 uint32_t reset_active:1; /* 3 */ 1062 uint32_t reset_active:1; /* 3 */
1083 uint32_t abort_isp_active:1; /* 4 */ 1063 uint32_t abort_isp_active:1; /* 4 */
1084 uint32_t disable_risc_code_load:1; /* 5 */ 1064 uint32_t disable_risc_code_load:1; /* 5 */
1085 uint32_t enable_64bit_addressing:1; /* 6 */
1086 uint32_t in_reset:1; /* 7 */
1087 uint32_t ints_enabled:1;
1088 uint32_t ignore_nvram:1;
1089#ifdef __ia64__ 1065#ifdef __ia64__
1090 uint32_t use_pci_vchannel:1; 1066 uint32_t use_pci_vchannel:1;
1091#endif 1067#endif
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 659a5d63467d..fe0fce71adc7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -211,6 +211,138 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
211 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 211 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
212} 212}
213 213
214/* Scsi_Host attributes. */
215
216static ssize_t
217qla2x00_drvr_version_show(struct class_device *cdev, char *buf)
218{
219 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
220}
221
222static ssize_t
223qla2x00_fw_version_show(struct class_device *cdev, char *buf)
224{
225 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
226 char fw_str[30];
227
228 return snprintf(buf, PAGE_SIZE, "%s\n",
229 ha->isp_ops.fw_version_str(ha, fw_str));
230}
231
232static ssize_t
233qla2x00_serial_num_show(struct class_device *cdev, char *buf)
234{
235 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
236 uint32_t sn;
237
238 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
239 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
240 sn % 100000);
241}
242
243static ssize_t
244qla2x00_isp_name_show(struct class_device *cdev, char *buf)
245{
246 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
247 return snprintf(buf, PAGE_SIZE, "%s\n", ha->brd_info->isp_name);
248}
249
250static ssize_t
251qla2x00_isp_id_show(struct class_device *cdev, char *buf)
252{
253 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
254 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
255 ha->product_id[0], ha->product_id[1], ha->product_id[2],
256 ha->product_id[3]);
257}
258
259static ssize_t
260qla2x00_model_name_show(struct class_device *cdev, char *buf)
261{
262 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
263 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
264}
265
266static ssize_t
267qla2x00_model_desc_show(struct class_device *cdev, char *buf)
268{
269 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
270 return snprintf(buf, PAGE_SIZE, "%s\n",
271 ha->model_desc ? ha->model_desc: "");
272}
273
274static ssize_t
275qla2x00_pci_info_show(struct class_device *cdev, char *buf)
276{
277 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
278 char pci_info[30];
279
280 return snprintf(buf, PAGE_SIZE, "%s\n",
281 ha->isp_ops.pci_info_str(ha, pci_info));
282}
283
284static ssize_t
285qla2x00_state_show(struct class_device *cdev, char *buf)
286{
287 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
288 int len = 0;
289
290 if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
291 atomic_read(&ha->loop_state) == LOOP_DEAD)
292 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
293 else if (atomic_read(&ha->loop_state) != LOOP_READY ||
294 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
295 test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
296 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
297 else {
298 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
299
300 switch (ha->current_topology) {
301 case ISP_CFG_NL:
302 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
303 break;
304 case ISP_CFG_FL:
305 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
306 break;
307 case ISP_CFG_N:
308 len += snprintf(buf + len, PAGE_SIZE-len,
309 "N_Port to N_Port\n");
310 break;
311 case ISP_CFG_F:
312 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
313 break;
314 default:
315 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
316 break;
317 }
318 }
319 return len;
320}
321
322static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
323 NULL);
324static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
325static CLASS_DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
326static CLASS_DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
327static CLASS_DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
328static CLASS_DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
329static CLASS_DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
330static CLASS_DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
331static CLASS_DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
332
333struct class_device_attribute *qla2x00_host_attrs[] = {
334 &class_device_attr_driver_version,
335 &class_device_attr_fw_version,
336 &class_device_attr_serial_num,
337 &class_device_attr_isp_name,
338 &class_device_attr_isp_id,
339 &class_device_attr_model_name,
340 &class_device_attr_model_desc,
341 &class_device_attr_pci_info,
342 &class_device_attr_state,
343 NULL,
344};
345
214/* Host attributes. */ 346/* Host attributes. */
215 347
216static void 348static void
@@ -304,10 +436,13 @@ struct fc_function_template qla2xxx_transport_functions = {
304 436
305 .show_host_node_name = 1, 437 .show_host_node_name = 1,
306 .show_host_port_name = 1, 438 .show_host_port_name = 1,
439 .show_host_supported_classes = 1,
440
307 .get_host_port_id = qla2x00_get_host_port_id, 441 .get_host_port_id = qla2x00_get_host_port_id,
308 .show_host_port_id = 1, 442 .show_host_port_id = 1,
309 443
310 .dd_fcrport_size = sizeof(struct fc_port *), 444 .dd_fcrport_size = sizeof(struct fc_port *),
445 .show_rport_supported_classes = 1,
311 446
312 .get_starget_node_name = qla2x00_get_starget_node_name, 447 .get_starget_node_name = qla2x00_get_starget_node_name,
313 .show_starget_node_name = 1, 448 .show_starget_node_name = 1,
@@ -329,4 +464,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
329 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name); 464 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
330 fc_host_port_name(ha->host) = 465 fc_host_port_name(ha->host) =
331 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name); 466 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
467 fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
332} 468}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index b8d90e97e017..9684e7a91fa9 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -81,6 +81,7 @@
81#define DEBUG2_3_11(x) do {x;} while (0); 81#define DEBUG2_3_11(x) do {x;} while (0);
82#define DEBUG2_9_10(x) do {x;} while (0); 82#define DEBUG2_9_10(x) do {x;} while (0);
83#define DEBUG2_11(x) do {x;} while (0); 83#define DEBUG2_11(x) do {x;} while (0);
84#define DEBUG2_13(x) do {x;} while (0);
84#else 85#else
85#define DEBUG2(x) do {} while (0); 86#define DEBUG2(x) do {} while (0);
86#endif 87#endif
@@ -169,8 +170,14 @@
169 170
170#if defined(QL_DEBUG_LEVEL_13) 171#if defined(QL_DEBUG_LEVEL_13)
171#define DEBUG13(x) do {x;} while (0) 172#define DEBUG13(x) do {x;} while (0)
173#if !defined(DEBUG2_13)
174#define DEBUG2_13(x) do {x;} while(0)
175#endif
172#else 176#else
173#define DEBUG13(x) do {} while (0) 177#define DEBUG13(x) do {} while (0)
178#if !defined(QL_DEBUG_LEVEL_2)
179#define DEBUG2_13(x) do {} while(0)
180#endif
174#endif 181#endif
175 182
176#if defined(QL_DEBUG_LEVEL_14) 183#if defined(QL_DEBUG_LEVEL_14)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1c6d366f4fad..b455c31405e4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -214,6 +214,7 @@
214 * valid range of an N-PORT id is 0 through 0x7ef. 214 * valid range of an N-PORT id is 0 through 0x7ef.
215 */ 215 */
216#define NPH_LAST_HANDLE 0x7ef 216#define NPH_LAST_HANDLE 0x7ef
217#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
217#define NPH_SNS 0x7fc /* FFFFFC */ 218#define NPH_SNS 0x7fc /* FFFFFC */
218#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */ 219#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
219#define NPH_F_PORT 0x7fe /* FFFFFE */ 220#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -630,6 +631,7 @@ typedef struct {
630#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */ 631#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
631#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */ 632#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */
632#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */ 633#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */
634#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */
633#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */ 635#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */
634#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */ 636#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */
635#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */ 637#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
@@ -913,7 +915,7 @@ typedef struct {
913 * MSB BIT 1 = 915 * MSB BIT 1 =
914 * MSB BIT 2 = 916 * MSB BIT 2 =
915 * MSB BIT 3 = 917 * MSB BIT 3 =
916 * MSB BIT 4 = 918 * MSB BIT 4 = LED mode
917 * MSB BIT 5 = enable 50 ohm termination 919 * MSB BIT 5 = enable 50 ohm termination
918 * MSB BIT 6 = Data Rate (2300 only) 920 * MSB BIT 6 = Data Rate (2300 only)
919 * MSB BIT 7 = Data Rate (2300 only) 921 * MSB BIT 7 = Data Rate (2300 only)
@@ -1035,7 +1037,7 @@ typedef struct {
1035 * MSB BIT 1 = 1037 * MSB BIT 1 =
1036 * MSB BIT 2 = 1038 * MSB BIT 2 =
1037 * MSB BIT 3 = 1039 * MSB BIT 3 =
1038 * MSB BIT 4 = 1040 * MSB BIT 4 = LED mode
1039 * MSB BIT 5 = enable 50 ohm termination 1041 * MSB BIT 5 = enable 50 ohm termination
1040 * MSB BIT 6 = Data Rate (2300 only) 1042 * MSB BIT 6 = Data Rate (2300 only)
1041 * MSB BIT 7 = Data Rate (2300 only) 1043 * MSB BIT 7 = Data Rate (2300 only)
@@ -1131,10 +1133,7 @@ typedef struct {
1131 1133
1132 uint8_t link_down_timeout; 1134 uint8_t link_down_timeout;
1133 1135
1134 uint8_t adapter_id_0[4]; 1136 uint8_t adapter_id[16];
1135 uint8_t adapter_id_1[4];
1136 uint8_t adapter_id_2[4];
1137 uint8_t adapter_id_3[4];
1138 1137
1139 uint8_t alt1_boot_node_name[WWN_SIZE]; 1138 uint8_t alt1_boot_node_name[WWN_SIZE];
1140 uint16_t alt1_boot_lun_number; 1139 uint16_t alt1_boot_lun_number;
@@ -1673,6 +1672,7 @@ typedef struct fc_port {
1673 uint8_t cur_path; /* current path id */ 1672 uint8_t cur_path; /* current path id */
1674 1673
1675 struct fc_rport *rport; 1674 struct fc_rport *rport;
1675 u32 supported_classes;
1676} fc_port_t; 1676} fc_port_t;
1677 1677
1678/* 1678/*
@@ -1727,6 +1727,8 @@ typedef struct fc_port {
1727 1727
1728#define CT_REJECT_RESPONSE 0x8001 1728#define CT_REJECT_RESPONSE 0x8001
1729#define CT_ACCEPT_RESPONSE 0x8002 1729#define CT_ACCEPT_RESPONSE 0x8002
1730#define CT_REASON_CANNOT_PERFORM 0x09
1731#define CT_EXPL_ALREADY_REGISTERED 0x10
1730 1732
1731#define NS_N_PORT_TYPE 0x01 1733#define NS_N_PORT_TYPE 0x01
1732#define NS_NL_PORT_TYPE 0x02 1734#define NS_NL_PORT_TYPE 0x02
@@ -1768,6 +1770,100 @@ typedef struct fc_port {
1768#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255) 1770#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
1769#define RSNN_NN_RSP_SIZE 16 1771#define RSNN_NN_RSP_SIZE 16
1770 1772
1773/*
1774 * HBA attribute types.
1775 */
1776#define FDMI_HBA_ATTR_COUNT 9
1777#define FDMI_HBA_NODE_NAME 1
1778#define FDMI_HBA_MANUFACTURER 2
1779#define FDMI_HBA_SERIAL_NUMBER 3
1780#define FDMI_HBA_MODEL 4
1781#define FDMI_HBA_MODEL_DESCRIPTION 5
1782#define FDMI_HBA_HARDWARE_VERSION 6
1783#define FDMI_HBA_DRIVER_VERSION 7
1784#define FDMI_HBA_OPTION_ROM_VERSION 8
1785#define FDMI_HBA_FIRMWARE_VERSION 9
1786#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
1787#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
1788
1789struct ct_fdmi_hba_attr {
1790 uint16_t type;
1791 uint16_t len;
1792 union {
1793 uint8_t node_name[WWN_SIZE];
1794 uint8_t manufacturer[32];
1795 uint8_t serial_num[8];
1796 uint8_t model[16];
1797 uint8_t model_desc[80];
1798 uint8_t hw_version[16];
1799 uint8_t driver_version[32];
1800 uint8_t orom_version[16];
1801 uint8_t fw_version[16];
1802 uint8_t os_version[128];
1803 uint8_t max_ct_len[4];
1804 } a;
1805};
1806
1807struct ct_fdmi_hba_attributes {
1808 uint32_t count;
1809 struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
1810};
1811
1812/*
1813 * Port attribute types.
1814 */
1815#define FDMI_PORT_ATTR_COUNT 5
1816#define FDMI_PORT_FC4_TYPES 1
1817#define FDMI_PORT_SUPPORT_SPEED 2
1818#define FDMI_PORT_CURRENT_SPEED 3
1819#define FDMI_PORT_MAX_FRAME_SIZE 4
1820#define FDMI_PORT_OS_DEVICE_NAME 5
1821#define FDMI_PORT_HOST_NAME 6
1822
1823struct ct_fdmi_port_attr {
1824 uint16_t type;
1825 uint16_t len;
1826 union {
1827 uint8_t fc4_types[32];
1828 uint32_t sup_speed;
1829 uint32_t cur_speed;
1830 uint32_t max_frame_size;
1831 uint8_t os_dev_name[32];
1832 uint8_t host_name[32];
1833 } a;
1834};
1835
1836/*
1837 * Port Attribute Block.
1838 */
1839struct ct_fdmi_port_attributes {
1840 uint32_t count;
1841 struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
1842};
1843
1844/* FDMI definitions. */
1845#define GRHL_CMD 0x100
1846#define GHAT_CMD 0x101
1847#define GRPL_CMD 0x102
1848#define GPAT_CMD 0x110
1849
1850#define RHBA_CMD 0x200
1851#define RHBA_RSP_SIZE 16
1852
1853#define RHAT_CMD 0x201
1854#define RPRT_CMD 0x210
1855
1856#define RPA_CMD 0x211
1857#define RPA_RSP_SIZE 16
1858
1859#define DHBA_CMD 0x300
1860#define DHBA_REQ_SIZE (16 + 8)
1861#define DHBA_RSP_SIZE 16
1862
1863#define DHAT_CMD 0x301
1864#define DPRT_CMD 0x310
1865#define DPA_CMD 0x311
1866
1771/* CT command header -- request/response common fields */ 1867/* CT command header -- request/response common fields */
1772struct ct_cmd_hdr { 1868struct ct_cmd_hdr {
1773 uint8_t revision; 1869 uint8_t revision;
@@ -1825,6 +1921,43 @@ struct ct_sns_req {
1825 uint8_t name_len; 1921 uint8_t name_len;
1826 uint8_t sym_node_name[255]; 1922 uint8_t sym_node_name[255];
1827 } rsnn_nn; 1923 } rsnn_nn;
1924
1925 struct {
1926 uint8_t hba_indentifier[8];
1927 } ghat;
1928
1929 struct {
1930 uint8_t hba_identifier[8];
1931 uint32_t entry_count;
1932 uint8_t port_name[8];
1933 struct ct_fdmi_hba_attributes attrs;
1934 } rhba;
1935
1936 struct {
1937 uint8_t hba_identifier[8];
1938 struct ct_fdmi_hba_attributes attrs;
1939 } rhat;
1940
1941 struct {
1942 uint8_t port_name[8];
1943 struct ct_fdmi_port_attributes attrs;
1944 } rpa;
1945
1946 struct {
1947 uint8_t port_name[8];
1948 } dhba;
1949
1950 struct {
1951 uint8_t port_name[8];
1952 } dhat;
1953
1954 struct {
1955 uint8_t port_name[8];
1956 } dprt;
1957
1958 struct {
1959 uint8_t port_name[8];
1960 } dpa;
1828 } req; 1961 } req;
1829}; 1962};
1830 1963
@@ -1882,6 +2015,12 @@ struct ct_sns_rsp {
1882 struct { 2015 struct {
1883 uint8_t fc4_types[32]; 2016 uint8_t fc4_types[32];
1884 } gft_id; 2017 } gft_id;
2018
2019 struct {
2020 uint32_t entry_count;
2021 uint8_t port_name[8];
2022 struct ct_fdmi_hba_attributes attrs;
2023 } ghat;
1885 } rsp; 2024 } rsp;
1886}; 2025};
1887 2026
@@ -2032,6 +2171,8 @@ struct isp_operations {
2032 uint16_t (*calc_req_entries) (uint16_t); 2171 uint16_t (*calc_req_entries) (uint16_t);
2033 void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); 2172 void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
2034 void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); 2173 void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
2174 void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
2175 uint32_t);
2035 2176
2036 uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, 2177 uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
2037 uint32_t, uint32_t); 2178 uint32_t, uint32_t);
@@ -2111,6 +2252,7 @@ typedef struct scsi_qla_host {
2111#define IOCTL_ERROR_RECOVERY 23 2252#define IOCTL_ERROR_RECOVERY 23
2112#define LOOP_RESET_NEEDED 24 2253#define LOOP_RESET_NEEDED 24
2113#define BEACON_BLINK_NEEDED 25 2254#define BEACON_BLINK_NEEDED 25
2255#define REGISTER_FDMI_NEEDED 26
2114 2256
2115 uint32_t device_flags; 2257 uint32_t device_flags;
2116#define DFLG_LOCAL_DEVICES BIT_0 2258#define DFLG_LOCAL_DEVICES BIT_0
@@ -2204,6 +2346,7 @@ typedef struct scsi_qla_host {
2204 int port_down_retry_count; 2346 int port_down_retry_count;
2205 uint8_t mbx_count; 2347 uint8_t mbx_count;
2206 uint16_t last_loop_id; 2348 uint16_t last_loop_id;
2349 uint16_t mgmt_svr_loop_id;
2207 2350
2208 uint32_t login_retry_count; 2351 uint32_t login_retry_count;
2209 2352
@@ -2318,6 +2461,7 @@ typedef struct scsi_qla_host {
2318 uint8_t model_number[16+1]; 2461 uint8_t model_number[16+1];
2319#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 2462#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
2320 char *model_desc; 2463 char *model_desc;
2464 uint8_t adapter_id[16+1];
2321 2465
2322 uint8_t *node_name; 2466 uint8_t *node_name;
2323 uint8_t *port_name; 2467 uint8_t *port_name;
@@ -2377,6 +2521,7 @@ typedef struct scsi_qla_host {
2377#define QLA_SUSPENDED 0x106 2521#define QLA_SUSPENDED 0x106
2378#define QLA_BUSY 0x107 2522#define QLA_BUSY 0x107
2379#define QLA_RSCNS_HANDLED 0x108 2523#define QLA_RSCNS_HANDLED 0x108
2524#define QLA_ALREADY_REGISTERED 0x109
2380 2525
2381/* 2526/*
2382* Stat info for all adpaters 2527* Stat info for all adpaters
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 665c203e0675..1ed32e7b5472 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -79,6 +79,7 @@ extern int ql2xplogiabsentdevice;
79extern int ql2xenablezio; 79extern int ql2xenablezio;
80extern int ql2xintrdelaytimer; 80extern int ql2xintrdelaytimer;
81extern int ql2xloginretrycount; 81extern int ql2xloginretrycount;
82extern int ql2xfdmienable;
82 83
83extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 84extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
84 85
@@ -147,9 +148,6 @@ qla2x00_abort_target(fc_port_t *);
147#endif 148#endif
148 149
149extern int 150extern int
150qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
151
152extern int
153qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 151qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
154 uint8_t *, uint16_t *); 152 uint8_t *, uint16_t *);
155 153
@@ -215,6 +213,9 @@ qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
215extern int 213extern int
216qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); 214qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
217 215
216extern int
217qla2x00_stop_firmware(scsi_qla_host_t *);
218
218/* 219/*
219 * Global Function Prototypes in qla_isr.c source file. 220 * Global Function Prototypes in qla_isr.c source file.
220 */ 221 */
@@ -269,6 +270,9 @@ extern int qla2x00_rft_id(scsi_qla_host_t *);
269extern int qla2x00_rff_id(scsi_qla_host_t *); 270extern int qla2x00_rff_id(scsi_qla_host_t *);
270extern int qla2x00_rnn_id(scsi_qla_host_t *); 271extern int qla2x00_rnn_id(scsi_qla_host_t *);
271extern int qla2x00_rsnn_nn(scsi_qla_host_t *); 272extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
273extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
274extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
275extern int qla2x00_fdmi_register(scsi_qla_host_t *);
272 276
273/* 277/*
274 * Global Function Prototypes in qla_rscn.c source file. 278 * Global Function Prototypes in qla_rscn.c source file.
@@ -289,6 +293,8 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
289/* 293/*
290 * Global Function Prototypes in qla_attr.c source file. 294 * Global Function Prototypes in qla_attr.c source file.
291 */ 295 */
296struct class_device_attribute;
297extern struct class_device_attribute *qla2x00_host_attrs[];
292struct fc_function_template; 298struct fc_function_template;
293extern struct fc_function_template qla2xxx_transport_functions; 299extern struct fc_function_template qla2xxx_transport_functions;
294extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 300extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 31ce4f62da13..e7b138c2e339 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1099,3 +1099,567 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
1099 1099
1100 return (rval); 1100 return (rval);
1101} 1101}
1102
1103/**
1104 * qla2x00_mgmt_svr_login() - Login to fabric Managment Service.
1105 * @ha: HA context
1106 *
1107 * Returns 0 on success.
1108 */
1109static int
1110qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1111{
1112 int ret;
1113 uint16_t mb[MAILBOX_REGISTER_COUNT];
1114
1115 ret = QLA_SUCCESS;
1116 if (ha->flags.management_server_logged_in)
1117 return ret;
1118
1119 ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1120 mb, BIT_1);
1121 if (mb[0] != MBS_COMMAND_COMPLETE) {
1122 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1123 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
1124 __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
1125 mb[2], mb[6], mb[7]));
1126 ret = QLA_FUNCTION_FAILED;
1127 } else
1128 ha->flags.management_server_logged_in = 1;
1129
1130 return ret;
1131}
1132
1133/**
1134 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1135 * @ha: HA context
1136 * @req_size: request size in bytes
1137 * @rsp_size: response size in bytes
1138 *
1139 * Returns a pointer to the @ha's ms_iocb.
1140 */
1141void *
1142qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1143 uint32_t rsp_size)
1144{
1145 ms_iocb_entry_t *ms_pkt;
1146
1147 ms_pkt = ha->ms_iocb;
1148 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1149
1150 ms_pkt->entry_type = MS_IOCB_TYPE;
1151 ms_pkt->entry_count = 1;
1152 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
1153 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1154 ms_pkt->timeout = __constant_cpu_to_le16(59);
1155 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1156 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
1157 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1158 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1159
1160 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1161 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1162 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1163
1164 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1165 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1166 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1167
1168 return ms_pkt;
1169}
1170
1171/**
1172 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1173 * @ha: HA context
1174 * @req_size: request size in bytes
1175 * @rsp_size: response size in bytes
1176 *
1177 * Returns a pointer to the @ha's ms_iocb.
1178 */
1179void *
1180qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1181 uint32_t rsp_size)
1182{
1183 struct ct_entry_24xx *ct_pkt;
1184
1185 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1186 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1187
1188 ct_pkt->entry_type = CT_IOCB_TYPE;
1189 ct_pkt->entry_count = 1;
1190 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1191 ct_pkt->timeout = __constant_cpu_to_le16(59);
1192 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1193 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1194 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1195 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1196
1197 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1198 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1199 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1200
1201 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1202 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1203 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1204
1205 return ct_pkt;
1206}
1207
1208static inline ms_iocb_entry_t *
1209qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
1210{
1211 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1212 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1213
1214 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1215 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1216 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1217 } else {
1218 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1219 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1220 }
1221
1222 return ms_pkt;
1223}
1224
1225/**
1226 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1227 * @ct_req: CT request buffer
1228 * @cmd: GS command
1229 * @rsp_size: response size in bytes
1230 *
1231 * Returns a pointer to the intitialized @ct_req.
1232 */
1233static inline struct ct_sns_req *
1234qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
1235 uint16_t rsp_size)
1236{
1237 memset(ct_req, 0, sizeof(struct ct_sns_pkt));
1238
1239 ct_req->header.revision = 0x01;
1240 ct_req->header.gs_type = 0xFA;
1241 ct_req->header.gs_subtype = 0x10;
1242 ct_req->command = cpu_to_be16(cmd);
1243 ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1244
1245 return ct_req;
1246}
1247
1248/**
1249 * qla2x00_fdmi_rhba() -
1250 * @ha: HA context
1251 *
1252 * Returns 0 on success.
1253 */
1254static int
1255qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1256{
1257 int rval, alen;
1258 uint32_t size, sn;
1259
1260 ms_iocb_entry_t *ms_pkt;
1261 struct ct_sns_req *ct_req;
1262 struct ct_sns_rsp *ct_rsp;
1263 uint8_t *entries;
1264 struct ct_fdmi_hba_attr *eiter;
1265
1266 /* Issue RHBA */
1267 /* Prepare common MS IOCB */
1268 /* Request size adjusted after CT preparation */
1269 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
1270
1271 /* Prepare CT request */
1272 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
1273 RHBA_RSP_SIZE);
1274 ct_rsp = &ha->ct_sns->p.rsp;
1275
1276 /* Prepare FDMI command arguments -- attribute block, attributes. */
1277 memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
1278 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
1279 memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
1280 size = 2 * WWN_SIZE + 4 + 4;
1281
1282 /* Attributes */
1283 ct_req->req.rhba.attrs.count =
1284 __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1285 entries = ct_req->req.rhba.hba_identifier;
1286
1287 /* Nodename. */
1288 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1289 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1290 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1291 memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
1292 size += 4 + WWN_SIZE;
1293
1294 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
1295 __func__, ha->host_no,
1296 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
1297 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
1298 eiter->a.node_name[6], eiter->a.node_name[7]));
1299
1300 /* Manufacturer. */
1301 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1302 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
1303 strcpy(eiter->a.manufacturer, "QLogic Corporation");
1304 alen = strlen(eiter->a.manufacturer);
1305 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1306 eiter->len = cpu_to_be16(4 + alen);
1307 size += 4 + alen;
1308
1309 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
1310 eiter->a.manufacturer));
1311
1312 /* Serial number. */
1313 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1314 eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1315 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1316 sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
1317 alen = strlen(eiter->a.serial_num);
1318 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1319 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen;
1321
1322 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
1323 eiter->a.serial_num));
1324
1325 /* Model name. */
1326 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1327 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
1328 strcpy(eiter->a.model, ha->model_number);
1329 alen = strlen(eiter->a.model);
1330 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1331 eiter->len = cpu_to_be16(4 + alen);
1332 size += 4 + alen;
1333
1334 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
1335 eiter->a.model));
1336
1337 /* Model description. */
1338 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1339 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1340 if (ha->model_desc)
1341 strncpy(eiter->a.model_desc, ha->model_desc, 80);
1342 alen = strlen(eiter->a.model_desc);
1343 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1344 eiter->len = cpu_to_be16(4 + alen);
1345 size += 4 + alen;
1346
1347 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
1348 eiter->a.model_desc));
1349
1350 /* Hardware version. */
1351 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1352 eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1353 strcpy(eiter->a.hw_version, ha->adapter_id);
1354 alen = strlen(eiter->a.hw_version);
1355 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1356 eiter->len = cpu_to_be16(4 + alen);
1357 size += 4 + alen;
1358
1359 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
1360 eiter->a.hw_version));
1361
1362 /* Driver version. */
1363 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1364 eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1365 strcpy(eiter->a.driver_version, qla2x00_version_str);
1366 alen = strlen(eiter->a.driver_version);
1367 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1368 eiter->len = cpu_to_be16(4 + alen);
1369 size += 4 + alen;
1370
1371 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
1372 eiter->a.driver_version));
1373
1374 /* Option ROM version. */
1375 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1376 eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1377 strcpy(eiter->a.orom_version, "0.00");
1378 alen = strlen(eiter->a.orom_version);
1379 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1380 eiter->len = cpu_to_be16(4 + alen);
1381 size += 4 + alen;
1382
1383 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
1384 eiter->a.orom_version));
1385
1386 /* Firmware version */
1387 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1388 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1389 ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
1390 alen = strlen(eiter->a.fw_version);
1391 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1392 eiter->len = cpu_to_be16(4 + alen);
1393 size += 4 + alen;
1394
1395 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
1396 eiter->a.fw_version));
1397
1398 /* Update MS request size. */
1399 qla2x00_update_ms_fdmi_iocb(ha, size + 16);
1400
1401 DEBUG13(printk("%s(%ld): RHBA identifier="
1402 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1403 ha->host_no, ct_req->req.rhba.hba_identifier[0],
1404 ct_req->req.rhba.hba_identifier[1],
1405 ct_req->req.rhba.hba_identifier[2],
1406 ct_req->req.rhba.hba_identifier[3],
1407 ct_req->req.rhba.hba_identifier[4],
1408 ct_req->req.rhba.hba_identifier[5],
1409 ct_req->req.rhba.hba_identifier[6],
1410 ct_req->req.rhba.hba_identifier[7], size));
1411 DEBUG13(qla2x00_dump_buffer(entries, size));
1412
1413 /* Execute MS IOCB */
1414 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1415 sizeof(ms_iocb_entry_t));
1416 if (rval != QLA_SUCCESS) {
1417 /*EMPTY*/
1418 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
1419 ha->host_no, rval));
1420 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
1421 QLA_SUCCESS) {
1422 rval = QLA_FUNCTION_FAILED;
1423 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1424 ct_rsp->header.explanation_code ==
1425 CT_EXPL_ALREADY_REGISTERED) {
1426 DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
1427 __func__, ha->host_no));
1428 rval = QLA_ALREADY_REGISTERED;
1429 }
1430 } else {
1431 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
1432 ha->host_no));
1433 }
1434
1435 return rval;
1436}
1437
1438/**
1439 * qla2x00_fdmi_dhba() -
1440 * @ha: HA context
1441 *
1442 * Returns 0 on success.
1443 */
1444static int
1445qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1446{
1447 int rval;
1448
1449 ms_iocb_entry_t *ms_pkt;
1450 struct ct_sns_req *ct_req;
1451 struct ct_sns_rsp *ct_rsp;
1452
1453 /* Issue RPA */
1454 /* Prepare common MS IOCB */
1455 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
1456 DHBA_RSP_SIZE);
1457
1458 /* Prepare CT request */
1459 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, DHBA_CMD,
1460 DHBA_RSP_SIZE);
1461 ct_rsp = &ha->ct_sns->p.rsp;
1462
1463 /* Prepare FDMI command arguments -- portname. */
1464 memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
1465
1466 DEBUG13(printk("%s(%ld): DHBA portname="
1467 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
1468 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1469 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1470 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1471 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
1472
1473 /* Execute MS IOCB */
1474 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1475 sizeof(ms_iocb_entry_t));
1476 if (rval != QLA_SUCCESS) {
1477 /*EMPTY*/
1478 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
1479 ha->host_no, rval));
1480 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
1481 QLA_SUCCESS) {
1482 rval = QLA_FUNCTION_FAILED;
1483 } else {
1484 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
1485 ha->host_no));
1486 }
1487
1488 return rval;
1489}
1490
1491/**
1492 * qla2x00_fdmi_rpa() -
1493 * @ha: HA context
1494 *
1495 * Returns 0 on success.
1496 */
1497static int
1498qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1499{
1500 int rval, alen;
1501 uint32_t size, max_frame_size;
1502
1503 ms_iocb_entry_t *ms_pkt;
1504 struct ct_sns_req *ct_req;
1505 struct ct_sns_rsp *ct_rsp;
1506 uint8_t *entries;
1507 struct ct_fdmi_port_attr *eiter;
1508 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1509
1510 /* Issue RPA */
1511 /* Prepare common MS IOCB */
1512 /* Request size adjusted after CT preparation */
1513 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
1514
1515 /* Prepare CT request */
1516 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
1517 RPA_RSP_SIZE);
1518 ct_rsp = &ha->ct_sns->p.rsp;
1519
1520 /* Prepare FDMI command arguments -- attribute block, attributes. */
1521 memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
1522 size = WWN_SIZE + 4;
1523
1524 /* Attributes */
1525 ct_req->req.rpa.attrs.count =
1526 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1527 entries = ct_req->req.rpa.port_name;
1528
1529 /* FC4 types. */
1530 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1531 eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
1532 eiter->len = __constant_cpu_to_be16(4 + 32);
1533 eiter->a.fc4_types[2] = 0x01;
1534 size += 4 + 32;
1535
1536 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
1537 eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
1538
1539 /* Supported speed. */
1540 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1541 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1542 eiter->len = __constant_cpu_to_be16(4 + 4);
1543 if (IS_QLA25XX(ha))
1544 eiter->a.sup_speed = __constant_cpu_to_be32(4);
1545 else if (IS_QLA24XX(ha))
1546 eiter->a.sup_speed = __constant_cpu_to_be32(8);
1547 else if (IS_QLA23XX(ha))
1548 eiter->a.sup_speed = __constant_cpu_to_be32(2);
1549 else
1550 eiter->a.sup_speed = __constant_cpu_to_be32(1);
1551 size += 4 + 4;
1552
1553 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
1554 eiter->a.sup_speed));
1555
1556 /* Current speed. */
1557 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1558 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1559 eiter->len = __constant_cpu_to_be16(4 + 4);
1560 switch (ha->link_data_rate) {
1561 case 0:
1562 eiter->a.cur_speed = __constant_cpu_to_be32(1);
1563 break;
1564 case 1:
1565 eiter->a.cur_speed = __constant_cpu_to_be32(2);
1566 break;
1567 case 3:
1568 eiter->a.cur_speed = __constant_cpu_to_be32(8);
1569 break;
1570 case 4:
1571 eiter->a.cur_speed = __constant_cpu_to_be32(4);
1572 break;
1573 }
1574 size += 4 + 4;
1575
1576 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
1577 eiter->a.cur_speed));
1578
1579 /* Max frame size. */
1580 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1581 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1582 eiter->len = __constant_cpu_to_be16(4 + 4);
1583 max_frame_size = IS_QLA24XX(ha) || IS_QLA25XX(ha) ?
1584 (uint32_t) icb24->frame_payload_size:
1585 (uint32_t) ha->init_cb->frame_payload_size;
1586 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1587 size += 4 + 4;
1588
1589 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
1590 eiter->a.max_frame_size));
1591
1592 /* OS device name. */
1593 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1594 eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1595 sprintf(eiter->a.os_dev_name, "/proc/scsi/qla2xxx/%ld", ha->host_no);
1596 alen = strlen(eiter->a.os_dev_name);
1597 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1598 eiter->len = cpu_to_be16(4 + alen);
1599 size += 4 + alen;
1600
1601 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
1602 eiter->a.os_dev_name));
1603
1604 /* Update MS request size. */
1605 qla2x00_update_ms_fdmi_iocb(ha, size + 16);
1606
1607 DEBUG13(printk("%s(%ld): RPA portname="
1608 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1609 ha->host_no, ct_req->req.rpa.port_name[0],
1610 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
1611 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
1612 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
1613 ct_req->req.rpa.port_name[7], size));
1614 DEBUG13(qla2x00_dump_buffer(entries, size));
1615
1616 /* Execute MS IOCB */
1617 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1618 sizeof(ms_iocb_entry_t));
1619 if (rval != QLA_SUCCESS) {
1620 /*EMPTY*/
1621 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
1622 ha->host_no, rval));
1623 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
1624 QLA_SUCCESS) {
1625 rval = QLA_FUNCTION_FAILED;
1626 } else {
1627 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
1628 ha->host_no));
1629 }
1630
1631 return rval;
1632}
1633
1634/**
1635 * qla2x00_fdmi_register() -
1636 * @ha: HA context
1637 *
1638 * Returns 0 on success.
1639 */
1640int
1641qla2x00_fdmi_register(scsi_qla_host_t *ha)
1642{
1643 int rval;
1644
1645 rval = qla2x00_mgmt_svr_login(ha);
1646 if (rval)
1647 return rval;
1648
1649 rval = qla2x00_fdmi_rhba(ha);
1650 if (rval) {
1651 if (rval != QLA_ALREADY_REGISTERED)
1652 return rval;
1653
1654 rval = qla2x00_fdmi_dhba(ha);
1655 if (rval)
1656 return rval;
1657
1658 rval = qla2x00_fdmi_rhba(ha);
1659 if (rval)
1660 return rval;
1661 }
1662 rval = qla2x00_fdmi_rpa(ha);
1663
1664 return rval;
1665}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a6d2559217cd..c619583e646b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -88,6 +88,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
88 ha->mbx_flags = 0; 88 ha->mbx_flags = 0;
89 ha->isp_abort_cnt = 0; 89 ha->isp_abort_cnt = 0;
90 ha->beacon_blink_led = 0; 90 ha->beacon_blink_led = 0;
91 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
91 92
92 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 93 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
93 rval = ha->isp_ops.pci_config(ha); 94 rval = ha->isp_ops.pci_config(ha);
@@ -1563,7 +1564,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1563 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 1564 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
1564 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1565 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1565 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1566 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1566 ha->flags.enable_led_scheme = ((nv->efi_parameters & BIT_3) ? 1 : 0); 1567 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1567 1568
1568 ha->operating_mode = 1569 ha->operating_mode =
1569 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1570 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -1697,6 +1698,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
1697 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 1698 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
1698 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1699 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1699 fcport->flags = FCF_RLC_SUPPORT; 1700 fcport->flags = FCF_RLC_SUPPORT;
1701 fcport->supported_classes = FC_COS_UNSPECIFIED;
1700 1702
1701 return (fcport); 1703 return (fcport);
1702} 1704}
@@ -1898,7 +1900,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
1898 continue; 1900 continue;
1899 1901
1900 /* Bypass if not same domain and area of adapter. */ 1902 /* Bypass if not same domain and area of adapter. */
1901 if (area != ha->d_id.b.area || domain != ha->d_id.b.domain) 1903 if (area && domain &&
1904 (area != ha->d_id.b.area || domain != ha->d_id.b.domain))
1902 continue; 1905 continue;
1903 1906
1904 /* Bypass invalid local loop ID. */ 1907 /* Bypass invalid local loop ID. */
@@ -2075,6 +2078,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2075 return; 2078 return;
2076 } 2079 }
2077 rport->dd_data = fcport; 2080 rport->dd_data = fcport;
2081 rport->supported_classes = fcport->supported_classes;
2078 2082
2079 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2083 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2080 if (fcport->port_type == FCT_INITIATOR) 2084 if (fcport->port_type == FCT_INITIATOR)
@@ -2130,6 +2134,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2130 return (QLA_SUCCESS); 2134 return (QLA_SUCCESS);
2131 } 2135 }
2132 do { 2136 do {
2137 /* FDMI support. */
2138 if (ql2xfdmienable &&
2139 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
2140 qla2x00_fdmi_register(ha);
2141
2133 /* Ensure we are logged into the SNS. */ 2142 /* Ensure we are logged into the SNS. */
2134 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) 2143 if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
2135 loop_id = NPH_SNS; 2144 loop_id = NPH_SNS;
@@ -2392,6 +2401,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2392 if (new_fcport->d_id.b24 == ha->d_id.b24) 2401 if (new_fcport->d_id.b24 == ha->d_id.b24)
2393 continue; 2402 continue;
2394 2403
2404 /* Bypass if same domain and area of adapter. */
2405 if (((new_fcport->d_id.b24 & 0xffff00) ==
2406 (ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2407 ISP_CFG_FL)
2408 continue;
2409
2395 /* Bypass reserved domain fields. */ 2410 /* Bypass reserved domain fields. */
2396 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 2411 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
2397 continue; 2412 continue;
@@ -2794,6 +2809,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2794 } 2809 }
2795 } 2810 }
2796 2811
2812 if (mb[10] & BIT_0)
2813 fcport->supported_classes |= FC_COS_CLASS2;
2814 if (mb[10] & BIT_1)
2815 fcport->supported_classes |= FC_COS_CLASS3;
2816
2797 rval = QLA_SUCCESS; 2817 rval = QLA_SUCCESS;
2798 break; 2818 break;
2799 } else if (mb[0] == MBS_LOOP_ID_USED) { 2819 } else if (mb[0] == MBS_LOOP_ID_USED) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ebdc3c54d155..37f82e2cd7fb 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -810,12 +810,8 @@ qla24xx_start_scsi(srb_t *sp)
810 ha->req_q_cnt = ha->request_q_length - 810 ha->req_q_cnt = ha->request_q_length -
811 (ha->req_ring_index - cnt); 811 (ha->req_ring_index - cnt);
812 } 812 }
813 if (ha->req_q_cnt < (req_cnt + 2)) { 813 if (ha->req_q_cnt < (req_cnt + 2))
814 if (cmd->use_sg)
815 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
816 cmd->sc_data_direction);
817 goto queuing_error; 814 goto queuing_error;
818 }
819 815
820 /* Build command packet. */ 816 /* Build command packet. */
821 ha->current_outstanding_cmd = handle; 817 ha->current_outstanding_cmd = handle;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f910de6dd437..c255bb0268a9 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -451,6 +451,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
451 451
452 ha->flags.management_server_logged_in = 0; 452 ha->flags.management_server_logged_in = 0;
453 ha->link_data_rate = 0; 453 ha->link_data_rate = 0;
454 if (ql2xfdmienable)
455 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
454 456
455 /* Update AEN queue. */ 457 /* Update AEN queue. */
456 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL); 458 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 409ea0ac4032..13e1c9047079 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -19,6 +19,7 @@
19#include "qla_def.h" 19#include "qla_def.h"
20 20
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <scsi/scsi_transport_fc.h>
22 23
23static void 24static void
24qla2x00_mbx_sem_timeout(unsigned long data) 25qla2x00_mbx_sem_timeout(unsigned long data)
@@ -251,7 +252,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
251 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 252 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
252 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 253 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
253 } else { 254 } else {
254 mb0 = RD_MAILBOX_REG(ha, reg->isp, 0); 255 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
255 ictrl = RD_REG_WORD(&reg->isp.ictrl); 256 ictrl = RD_REG_WORD(&reg->isp.ictrl);
256 } 257 }
257 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 258 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
@@ -983,58 +984,6 @@ qla2x00_abort_target(fc_port_t *fcport)
983#endif 984#endif
984 985
985/* 986/*
986 * qla2x00_target_reset
987 * Issue target reset mailbox command.
988 *
989 * Input:
990 * ha = adapter block pointer.
991 * TARGET_QUEUE_LOCK must be released.
992 * ADAPTER_STATE_LOCK must be released.
993 *
994 * Returns:
995 * qla2x00 local function return status code.
996 *
997 * Context:
998 * Kernel context.
999 */
1000int
1001qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
1002{
1003 int rval;
1004 mbx_cmd_t mc;
1005 mbx_cmd_t *mcp = &mc;
1006
1007 DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
1008
1009 if (atomic_read(&fcport->state) != FCS_ONLINE)
1010 return 0;
1011
1012 mcp->mb[0] = MBC_TARGET_RESET;
1013 if (HAS_EXTENDED_IDS(ha))
1014 mcp->mb[1] = fcport->loop_id;
1015 else
1016 mcp->mb[1] = fcport->loop_id << 8;
1017 mcp->mb[2] = ha->loop_reset_delay;
1018 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1019 mcp->in_mb = MBX_0;
1020 mcp->tov = 30;
1021 mcp->flags = 0;
1022 rval = qla2x00_mailbox_command(ha, mcp);
1023
1024 if (rval != QLA_SUCCESS) {
1025 /*EMPTY*/
1026 DEBUG2_3_11(printk("qla2x00_target_reset(%ld): failed=%x.\n",
1027 ha->host_no, rval);)
1028 } else {
1029 /*EMPTY*/
1030 DEBUG11(printk("qla2x00_target_reset(%ld): done.\n",
1031 ha->host_no);)
1032 }
1033
1034 return rval;
1035}
1036
1037/*
1038 * qla2x00_get_adapter_id 987 * qla2x00_get_adapter_id
1039 * Get adapter ID and topology. 988 * Get adapter ID and topology.
1040 * 989 *
@@ -1326,6 +1275,10 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1326 fcport->port_type = FCT_INITIATOR; 1275 fcport->port_type = FCT_INITIATOR;
1327 else 1276 else
1328 fcport->port_type = FCT_TARGET; 1277 fcport->port_type = FCT_TARGET;
1278
1279 /* Passback COS information. */
1280 fcport->supported_classes = (pd->options & BIT_4) ?
1281 FC_COS_CLASS2: FC_COS_CLASS3;
1329 } 1282 }
1330 1283
1331gpd_error_out: 1284gpd_error_out:
@@ -1661,6 +1614,13 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1661 mb[1] |= BIT_1; 1614 mb[1] |= BIT_1;
1662 } else 1615 } else
1663 mb[1] = BIT_0; 1616 mb[1] = BIT_0;
1617
1618 /* Passback COS information. */
1619 mb[10] = 0;
1620 if (lg->io_parameter[7] || lg->io_parameter[8])
1621 mb[10] |= BIT_0; /* Class 2. */
1622 if (lg->io_parameter[9] || lg->io_parameter[10])
1623 mb[10] |= BIT_1; /* Class 3. */
1664 } 1624 }
1665 1625
1666 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1626 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1723,6 +1683,8 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1723 mb[2] = mcp->mb[2]; 1683 mb[2] = mcp->mb[2];
1724 mb[6] = mcp->mb[6]; 1684 mb[6] = mcp->mb[6];
1725 mb[7] = mcp->mb[7]; 1685 mb[7] = mcp->mb[7];
1686 /* COS retrieved from Get-Port-Database mailbox command. */
1687 mb[10] = 0;
1726 } 1688 }
1727 1689
1728 if (rval != QLA_SUCCESS) { 1690 if (rval != QLA_SUCCESS) {
@@ -2465,3 +2427,32 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
2465 2427
2466 return rval; 2428 return rval;
2467} 2429}
2430
2431int
2432qla2x00_stop_firmware(scsi_qla_host_t *ha)
2433{
2434 int rval;
2435 mbx_cmd_t mc;
2436 mbx_cmd_t *mcp = &mc;
2437
2438 if (!IS_QLA24XX(ha) && !IS_QLA25XX(ha))
2439 return QLA_FUNCTION_FAILED;
2440
2441 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2442
2443 mcp->mb[0] = MBC_STOP_FIRMWARE;
2444 mcp->out_mb = MBX_0;
2445 mcp->in_mb = MBX_0;
2446 mcp->tov = 5;
2447 mcp->flags = 0;
2448 rval = qla2x00_mailbox_command(ha, mcp);
2449
2450 if (rval != QLA_SUCCESS) {
2451 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2452 ha->host_no, rval));
2453 } else {
2454 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2455 }
2456
2457 return rval;
2458}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9000659bfbcf..8982978c42fd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -79,7 +79,7 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
79MODULE_PARM_DESC(ql2xloginretrycount, 79MODULE_PARM_DESC(ql2xloginretrycount,
80 "Specify an alternate value for the NVRAM login retry count."); 80 "Specify an alternate value for the NVRAM login retry count.");
81 81
82int ql2xfwloadbin; 82int ql2xfwloadbin=1;
83module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); 83module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
84MODULE_PARM_DESC(ql2xfwloadbin, 84MODULE_PARM_DESC(ql2xfwloadbin,
85 "Load ISP2xxx firmware image via hotplug."); 85 "Load ISP2xxx firmware image via hotplug.");
@@ -88,6 +88,12 @@ static void qla2x00_free_device(scsi_qla_host_t *);
88 88
89static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 89static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
90 90
91int ql2xfdmienable;
92module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
93MODULE_PARM_DESC(ql2xfdmienable,
94 "Enables FDMI registratons "
95 "Default is 0 - no FDMI. 1 - perfom FDMI.");
96
91/* 97/*
92 * SCSI host template entry points 98 * SCSI host template entry points
93 */ 99 */
@@ -105,6 +111,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
105static int qla2x00_loop_reset(scsi_qla_host_t *ha); 111static int qla2x00_loop_reset(scsi_qla_host_t *ha);
106static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); 112static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
107 113
114static int qla2x00_change_queue_depth(struct scsi_device *, int);
115static int qla2x00_change_queue_type(struct scsi_device *, int);
116
108static struct scsi_host_template qla2x00_driver_template = { 117static struct scsi_host_template qla2x00_driver_template = {
109 .module = THIS_MODULE, 118 .module = THIS_MODULE,
110 .name = "qla2xxx", 119 .name = "qla2xxx",
@@ -119,6 +128,8 @@ static struct scsi_host_template qla2x00_driver_template = {
119 128
120 .slave_alloc = qla2xxx_slave_alloc, 129 .slave_alloc = qla2xxx_slave_alloc,
121 .slave_destroy = qla2xxx_slave_destroy, 130 .slave_destroy = qla2xxx_slave_destroy,
131 .change_queue_depth = qla2x00_change_queue_depth,
132 .change_queue_type = qla2x00_change_queue_type,
122 .this_id = -1, 133 .this_id = -1,
123 .cmd_per_lun = 3, 134 .cmd_per_lun = 3,
124 .use_clustering = ENABLE_CLUSTERING, 135 .use_clustering = ENABLE_CLUSTERING,
@@ -129,6 +140,7 @@ static struct scsi_host_template qla2x00_driver_template = {
129 * which equates to 0x800000 sectors. 140 * which equates to 0x800000 sectors.
130 */ 141 */
131 .max_sectors = 0xFFFF, 142 .max_sectors = 0xFFFF,
143 .shost_attrs = qla2x00_host_attrs,
132}; 144};
133 145
134static struct scsi_host_template qla24xx_driver_template = { 146static struct scsi_host_template qla24xx_driver_template = {
@@ -145,12 +157,15 @@ static struct scsi_host_template qla24xx_driver_template = {
145 157
146 .slave_alloc = qla2xxx_slave_alloc, 158 .slave_alloc = qla2xxx_slave_alloc,
147 .slave_destroy = qla2xxx_slave_destroy, 159 .slave_destroy = qla2xxx_slave_destroy,
160 .change_queue_depth = qla2x00_change_queue_depth,
161 .change_queue_type = qla2x00_change_queue_type,
148 .this_id = -1, 162 .this_id = -1,
149 .cmd_per_lun = 3, 163 .cmd_per_lun = 3,
150 .use_clustering = ENABLE_CLUSTERING, 164 .use_clustering = ENABLE_CLUSTERING,
151 .sg_tablesize = SG_ALL, 165 .sg_tablesize = SG_ALL,
152 166
153 .max_sectors = 0xFFFF, 167 .max_sectors = 0xFFFF,
168 .shost_attrs = qla2x00_host_attrs,
154}; 169};
155 170
156static struct scsi_transport_template *qla2xxx_transport_template = NULL; 171static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -487,14 +502,13 @@ qc24_fail_command:
487static int 502static int
488qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 503qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
489{ 504{
490#define ABORT_POLLING_PERIOD HZ 505#define ABORT_POLLING_PERIOD 1000
491#define ABORT_WAIT_ITER ((10 * HZ) / (ABORT_POLLING_PERIOD)) 506#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
492 unsigned long wait_iter = ABORT_WAIT_ITER; 507 unsigned long wait_iter = ABORT_WAIT_ITER;
493 int ret = QLA_SUCCESS; 508 int ret = QLA_SUCCESS;
494 509
495 while (CMD_SP(cmd)) { 510 while (CMD_SP(cmd)) {
496 set_current_state(TASK_UNINTERRUPTIBLE); 511 msleep(ABORT_POLLING_PERIOD);
497 schedule_timeout(ABORT_POLLING_PERIOD);
498 512
499 if (--wait_iter) 513 if (--wait_iter)
500 break; 514 break;
@@ -1016,7 +1030,7 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1016 if (fcport->port_type != FCT_TARGET) 1030 if (fcport->port_type != FCT_TARGET)
1017 continue; 1031 continue;
1018 1032
1019 status = qla2x00_target_reset(ha, fcport); 1033 status = qla2x00_device_reset(ha, fcport);
1020 if (status != QLA_SUCCESS) 1034 if (status != QLA_SUCCESS)
1021 break; 1035 break;
1022 } 1036 }
@@ -1103,6 +1117,28 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1103 sdev->hostdata = NULL; 1117 sdev->hostdata = NULL;
1104} 1118}
1105 1119
1120static int
1121qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
1122{
1123 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1124 return sdev->queue_depth;
1125}
1126
1127static int
1128qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1129{
1130 if (sdev->tagged_supported) {
1131 scsi_set_tag_type(sdev, tag_type);
1132 if (tag_type)
1133 scsi_activate_tcq(sdev, sdev->queue_depth);
1134 else
1135 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1136 } else
1137 tag_type = 0;
1138
1139 return tag_type;
1140}
1141
1106/** 1142/**
1107 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1143 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1108 * @ha: HA context 1144 * @ha: HA context
@@ -1113,36 +1149,23 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1113static void 1149static void
1114qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1150qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1115{ 1151{
1116 /* Assume 32bit DMA address */ 1152 /* Assume a 32bit DMA mask. */
1117 ha->flags.enable_64bit_addressing = 0; 1153 ha->flags.enable_64bit_addressing = 0;
1118 1154
1119 /* 1155 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
1120 * Given the two variants pci_set_dma_mask(), allow the compiler to 1156 /* Any upper-dword bits set? */
1121 * assist in setting the proper dma mask. 1157 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1122 */ 1158 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1123 if (sizeof(dma_addr_t) > 4) { 1159 /* Ok, a 64bit DMA mask is applicable. */
1124 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
1125 ha->flags.enable_64bit_addressing = 1; 1160 ha->flags.enable_64bit_addressing = 1;
1126 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; 1161 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
1127 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; 1162 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
1128 1163 return;
1129 if (pci_set_consistent_dma_mask(ha->pdev,
1130 DMA_64BIT_MASK)) {
1131 qla_printk(KERN_DEBUG, ha,
1132 "Failed to set 64 bit PCI consistent mask; "
1133 "using 32 bit.\n");
1134 pci_set_consistent_dma_mask(ha->pdev,
1135 DMA_32BIT_MASK);
1136 }
1137 } else {
1138 qla_printk(KERN_DEBUG, ha,
1139 "Failed to set 64 bit PCI DMA mask, falling back "
1140 "to 32 bit MASK.\n");
1141 pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1142 } 1164 }
1143 } else {
1144 pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1145 } 1165 }
1166
1167 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK);
1168 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
1146} 1169}
1147 1170
1148static int 1171static int
@@ -1316,6 +1339,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1316 ha->prev_topology = 0; 1339 ha->prev_topology = 0;
1317 ha->ports = MAX_BUSES; 1340 ha->ports = MAX_BUSES;
1318 ha->init_cb_size = sizeof(init_cb_t); 1341 ha->init_cb_size = sizeof(init_cb_t);
1342 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1319 1343
1320 /* Assign ISP specific operations. */ 1344 /* Assign ISP specific operations. */
1321 ha->isp_ops.pci_config = qla2100_pci_config; 1345 ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1338,6 +1362,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1338 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32; 1362 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
1339 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32; 1363 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
1340 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb; 1364 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
1365 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
1341 ha->isp_ops.read_nvram = qla2x00_read_nvram_data; 1366 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1342 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1367 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1343 ha->isp_ops.fw_dump = qla2100_fw_dump; 1368 ha->isp_ops.fw_dump = qla2100_fw_dump;
@@ -1375,6 +1400,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1375 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1400 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1376 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1401 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1377 ha->init_cb_size = sizeof(struct init_cb_24xx); 1402 ha->init_cb_size = sizeof(struct init_cb_24xx);
1403 ha->mgmt_svr_loop_id = 10;
1378 ha->isp_ops.pci_config = qla24xx_pci_config; 1404 ha->isp_ops.pci_config = qla24xx_pci_config;
1379 ha->isp_ops.reset_chip = qla24xx_reset_chip; 1405 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1380 ha->isp_ops.chip_diag = qla24xx_chip_diag; 1406 ha->isp_ops.chip_diag = qla24xx_chip_diag;
@@ -1395,6 +1421,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1395 ha->isp_ops.fabric_login = qla24xx_login_fabric; 1421 ha->isp_ops.fabric_login = qla24xx_login_fabric;
1396 ha->isp_ops.fabric_logout = qla24xx_fabric_logout; 1422 ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
1397 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb; 1423 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
1424 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
1398 ha->isp_ops.read_nvram = qla24xx_read_nvram_data; 1425 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1399 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1426 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1400 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1427 ha->isp_ops.fw_dump = qla24xx_fw_dump;
@@ -1558,8 +1585,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1558 return 0; 1585 return 0;
1559 1586
1560probe_failed: 1587probe_failed:
1561 fc_remove_host(ha->host);
1562
1563 qla2x00_free_device(ha); 1588 qla2x00_free_device(ha);
1564 1589
1565 scsi_host_put(host); 1590 scsi_host_put(host);
@@ -1601,10 +1626,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1601 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1626 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
1602 qla2x00_cancel_io_descriptors(ha); 1627 qla2x00_cancel_io_descriptors(ha);
1603 1628
1604 /* turn-off interrupts on the card */
1605 if (ha->interrupts_on)
1606 ha->isp_ops.disable_intrs(ha);
1607
1608 /* Disable timer */ 1629 /* Disable timer */
1609 if (ha->timer_active) 1630 if (ha->timer_active)
1610 qla2x00_stop_timer(ha); 1631 qla2x00_stop_timer(ha);
@@ -1624,8 +1645,14 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1624 } 1645 }
1625 } 1646 }
1626 1647
1627 qla2x00_mem_free(ha); 1648 /* Stop currently executing firmware. */
1649 qla2x00_stop_firmware(ha);
1650
1651 /* turn-off interrupts on the card */
1652 if (ha->interrupts_on)
1653 ha->isp_ops.disable_intrs(ha);
1628 1654
1655 qla2x00_mem_free(ha);
1629 1656
1630 ha->flags.online = 0; 1657 ha->flags.online = 0;
1631 1658
@@ -1934,7 +1961,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
1934{ 1961{
1935 struct list_head *fcpl, *fcptemp; 1962 struct list_head *fcpl, *fcptemp;
1936 fc_port_t *fcport; 1963 fc_port_t *fcport;
1937 unsigned long wtime;/* max wait time if mbx cmd is busy. */ 1964 unsigned int wtime;/* max wait time if mbx cmd is busy. */
1938 1965
1939 if (ha == NULL) { 1966 if (ha == NULL) {
1940 /* error */ 1967 /* error */
@@ -1943,11 +1970,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
1943 } 1970 }
1944 1971
1945 /* Make sure all other threads are stopped. */ 1972 /* Make sure all other threads are stopped. */
1946 wtime = 60 * HZ; 1973 wtime = 60 * 1000;
1947 while (ha->dpc_wait && wtime) { 1974 while (ha->dpc_wait && wtime)
1948 set_current_state(TASK_INTERRUPTIBLE); 1975 wtime = msleep_interruptible(wtime);
1949 wtime = schedule_timeout(wtime);
1950 }
1951 1976
1952 /* free ioctl memory */ 1977 /* free ioctl memory */
1953 qla2x00_free_ioctl_mem(ha); 1978 qla2x00_free_ioctl_mem(ha);
@@ -2478,15 +2503,15 @@ qla2x00_timer(scsi_qla_host_t *ha)
2478int 2503int
2479qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) 2504qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2480{ 2505{
2481 const unsigned int step = HZ/10; 2506 const unsigned int step = 100; /* msecs */
2507 unsigned int iterations = jiffies_to_msecs(timeout)/100;
2482 2508
2483 do { 2509 do {
2484 if (!down_trylock(sema)) 2510 if (!down_trylock(sema))
2485 return 0; 2511 return 0;
2486 set_current_state(TASK_INTERRUPTIBLE); 2512 if (msleep_interruptible(step))
2487 if (schedule_timeout(step))
2488 break; 2513 break;
2489 } while ((timeout -= step) > 0); 2514 } while (--iterations >= 0);
2490 2515
2491 return -ETIMEDOUT; 2516 return -ETIMEDOUT;
2492} 2517}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index d7f5c608009c..c14abf743b7c 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -468,21 +468,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
468 uint32_t dwords) 468 uint32_t dwords)
469{ 469{
470 uint32_t i; 470 uint32_t i;
471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
472
473 /* Pause RISC. */
474 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
475 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
476 471
477 /* Dword reads to flash. */ 472 /* Dword reads to flash. */
478 for (i = 0; i < dwords; i++, faddr++) 473 for (i = 0; i < dwords; i++, faddr++)
479 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 474 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
480 flash_data_to_access_addr(faddr))); 475 flash_data_to_access_addr(faddr)));
481 476
482 /* Release RISC pause. */
483 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
484 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
485
486 return dwptr; 477 return dwptr;
487} 478}
488 479
@@ -532,10 +523,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
532 523
533 ret = QLA_SUCCESS; 524 ret = QLA_SUCCESS;
534 525
535 /* Pause RISC. */
536 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
537 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
538
539 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 526 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
540 DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__, 527 DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__,
541 ha->host_no, man_id, flash_id)); 528 ha->host_no, man_id, flash_id));
@@ -599,10 +586,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
599 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 586 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
600 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 587 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
601 588
602 /* Release RISC pause. */
603 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
604 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
605
606 return ret; 589 return ret;
607} 590}
608 591
@@ -630,11 +613,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
630{ 613{
631 uint32_t i; 614 uint32_t i;
632 uint32_t *dwptr; 615 uint32_t *dwptr;
633 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
634
635 /* Pause RISC. */
636 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
637 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
638 616
639 /* Dword reads to flash. */ 617 /* Dword reads to flash. */
640 dwptr = (uint32_t *)buf; 618 dwptr = (uint32_t *)buf;
@@ -642,10 +620,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
642 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 620 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
643 nvram_data_to_access_addr(naddr))); 621 nvram_data_to_access_addr(naddr)));
644 622
645 /* Release RISC pause. */
646 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
647 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
648
649 return buf; 623 return buf;
650} 624}
651 625
@@ -690,10 +664,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
690 664
691 ret = QLA_SUCCESS; 665 ret = QLA_SUCCESS;
692 666
693 /* Pause RISC. */
694 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
695 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
696
697 /* Enable flash write. */ 667 /* Enable flash write. */
698 WRT_REG_DWORD(&reg->ctrl_status, 668 WRT_REG_DWORD(&reg->ctrl_status,
699 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); 669 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -728,9 +698,5 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
728 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 698 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
729 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 699 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
730 700
731 /* Release RISC pause. */
732 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
733 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
734
735 return ret; 701 return ret;
736} 702}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e3cd3618bc54..eae7d6edd531 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -19,9 +19,9 @@
19/* 19/*
20 * Driver version 20 * Driver version
21 */ 21 */
22#define QLA2XXX_VERSION "8.01.00b5-k" 22#define QLA2XXX_VERSION "8.01.00-k"
23 23
24#define QLA_DRIVER_MAJOR_VER 8 24#define QLA_DRIVER_MAJOR_VER 8
25#define QLA_DRIVER_MINOR_VER 1 25#define QLA_DRIVER_MINOR_VER 1
26#define QLA_DRIVER_PATCH_VER 0 26#define QLA_DRIVER_PATCH_VER 0
27#define QLA_DRIVER_BETA_VER 5 27#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
new file mode 100644
index 000000000000..f1ea5027865f
--- /dev/null
+++ b/drivers/scsi/raid_class.c
@@ -0,0 +1,250 @@
1/*
2 * RAID Attributes
3 */
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/list.h>
7#include <linux/raid_class.h>
8#include <scsi/scsi_device.h>
9#include <scsi/scsi_host.h>
10
11#define RAID_NUM_ATTRS 3
12
13struct raid_internal {
14 struct raid_template r;
15 struct raid_function_template *f;
16 /* The actual attributes */
17 struct class_device_attribute private_attrs[RAID_NUM_ATTRS];
18 /* The array of null terminated pointers to attributes
19 * needed by scsi_sysfs.c */
20 struct class_device_attribute *attrs[RAID_NUM_ATTRS + 1];
21};
22
23struct raid_component {
24 struct list_head node;
25 struct device *dev;
26 int num;
27};
28
29#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
30
31#define tc_to_raid_internal(tcont) ({ \
32 struct raid_template *r = \
33 container_of(tcont, struct raid_template, raid_attrs); \
34 to_raid_internal(r); \
35})
36
37#define ac_to_raid_internal(acont) ({ \
38 struct transport_container *tc = \
39 container_of(acont, struct transport_container, ac); \
40 tc_to_raid_internal(tc); \
41})
42
43#define class_device_to_raid_internal(cdev) ({ \
44 struct attribute_container *ac = \
45 attribute_container_classdev_to_container(cdev); \
46 ac_to_raid_internal(ac); \
47})
48
49
50static int raid_match(struct attribute_container *cont, struct device *dev)
51{
52 /* We have to look for every subsystem that could house
53 * emulated RAID devices, so start with SCSI */
54 struct raid_internal *i = ac_to_raid_internal(cont);
55
56 if (scsi_is_sdev_device(dev)) {
57 struct scsi_device *sdev = to_scsi_device(dev);
58
59 if (i->f->cookie != sdev->host->hostt)
60 return 0;
61
62 return i->f->is_raid(dev);
63 }
64 /* FIXME: look at other subsystems too */
65 return 0;
66}
67
68static int raid_setup(struct transport_container *tc, struct device *dev,
69 struct class_device *cdev)
70{
71 struct raid_data *rd;
72
73 BUG_ON(class_get_devdata(cdev));
74
75 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
76 if (!rd)
77 return -ENOMEM;
78
79 memset(rd, 0, sizeof(*rd));
80 INIT_LIST_HEAD(&rd->component_list);
81 class_set_devdata(cdev, rd);
82
83 return 0;
84}
85
86static int raid_remove(struct transport_container *tc, struct device *dev,
87 struct class_device *cdev)
88{
89 struct raid_data *rd = class_get_devdata(cdev);
90 struct raid_component *rc, *next;
91 class_set_devdata(cdev, NULL);
92 list_for_each_entry_safe(rc, next, &rd->component_list, node) {
93 char buf[40];
94 snprintf(buf, sizeof(buf), "component-%d", rc->num);
95 list_del(&rc->node);
96 sysfs_remove_link(&cdev->kobj, buf);
97 kfree(rc);
98 }
99 kfree(class_get_devdata(cdev));
100 return 0;
101}
102
103static DECLARE_TRANSPORT_CLASS(raid_class,
104 "raid_devices",
105 raid_setup,
106 raid_remove,
107 NULL);
108
109static struct {
110 enum raid_state value;
111 char *name;
112} raid_states[] = {
113 { RAID_ACTIVE, "active" },
114 { RAID_DEGRADED, "degraded" },
115 { RAID_RESYNCING, "resyncing" },
116 { RAID_OFFLINE, "offline" },
117};
118
119static const char *raid_state_name(enum raid_state state)
120{
121 int i;
122 char *name = NULL;
123
124 for (i = 0; i < sizeof(raid_states)/sizeof(raid_states[0]); i++) {
125 if (raid_states[i].value == state) {
126 name = raid_states[i].name;
127 break;
128 }
129 }
130 return name;
131}
132
133
134#define raid_attr_show_internal(attr, fmt, var, code) \
135static ssize_t raid_show_##attr(struct class_device *cdev, char *buf) \
136{ \
137 struct raid_data *rd = class_get_devdata(cdev); \
138 code \
139 return snprintf(buf, 20, #fmt "\n", var); \
140}
141
142#define raid_attr_ro_states(attr, states, code) \
143raid_attr_show_internal(attr, %s, name, \
144 const char *name; \
145 code \
146 name = raid_##states##_name(rd->attr); \
147) \
148static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
149
150
151#define raid_attr_ro_internal(attr, code) \
152raid_attr_show_internal(attr, %d, rd->attr, code) \
153static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
154
155#define ATTR_CODE(attr) \
156 struct raid_internal *i = class_device_to_raid_internal(cdev); \
157 if (i->f->get_##attr) \
158 i->f->get_##attr(cdev->dev);
159
160#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
161#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
162#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
163
164raid_attr_ro(level);
165raid_attr_ro_fn(resync);
166raid_attr_ro_state(state);
167
168void raid_component_add(struct raid_template *r,struct device *raid_dev,
169 struct device *component_dev)
170{
171 struct class_device *cdev =
172 attribute_container_find_class_device(&r->raid_attrs.ac,
173 raid_dev);
174 struct raid_component *rc;
175 struct raid_data *rd = class_get_devdata(cdev);
176 char buf[40];
177
178 rc = kmalloc(sizeof(*rc), GFP_KERNEL);
179 if (!rc)
180 return;
181
182 INIT_LIST_HEAD(&rc->node);
183 rc->dev = component_dev;
184 rc->num = rd->component_count++;
185
186 snprintf(buf, sizeof(buf), "component-%d", rc->num);
187 list_add_tail(&rc->node, &rd->component_list);
188 sysfs_create_link(&cdev->kobj, &component_dev->kobj, buf);
189}
190EXPORT_SYMBOL(raid_component_add);
191
192struct raid_template *
193raid_class_attach(struct raid_function_template *ft)
194{
195 struct raid_internal *i = kmalloc(sizeof(struct raid_internal),
196 GFP_KERNEL);
197 int count = 0;
198
199 if (unlikely(!i))
200 return NULL;
201
202 memset(i, 0, sizeof(*i));
203
204 i->f = ft;
205
206 i->r.raid_attrs.ac.class = &raid_class.class;
207 i->r.raid_attrs.ac.match = raid_match;
208 i->r.raid_attrs.ac.attrs = &i->attrs[0];
209
210 attribute_container_register(&i->r.raid_attrs.ac);
211
212 i->attrs[count++] = &class_device_attr_level;
213 i->attrs[count++] = &class_device_attr_resync;
214 i->attrs[count++] = &class_device_attr_state;
215
216 i->attrs[count] = NULL;
217 BUG_ON(count > RAID_NUM_ATTRS);
218
219 return &i->r;
220}
221EXPORT_SYMBOL(raid_class_attach);
222
223void
224raid_class_release(struct raid_template *r)
225{
226 struct raid_internal *i = to_raid_internal(r);
227
228 attribute_container_unregister(&i->r.raid_attrs.ac);
229
230 kfree(i);
231}
232EXPORT_SYMBOL(raid_class_release);
233
234static __init int raid_init(void)
235{
236 return transport_class_register(&raid_class);
237}
238
239static __exit void raid_exit(void)
240{
241 transport_class_unregister(&raid_class);
242}
243
244MODULE_AUTHOR("James Bottomley");
245MODULE_DESCRIPTION("RAID device class");
246MODULE_LICENSE("GPL");
247
248module_init(raid_init);
249module_exit(raid_exit);
250
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
new file mode 100644
index 000000000000..f97e3afa97d9
--- /dev/null
+++ b/drivers/scsi/sata_mv.c
@@ -0,0 +1,843 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 *
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/init.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/sched.h>
31#include <linux/dma-mapping.h>
32#include "scsi.h"
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35#include <asm/io.h>
36
37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12"
39
40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */
42 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
43 MV_IO_BAR = 2, /* offset 0x18: IO space */
44 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
45
46 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
47 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
48
49 MV_PCI_REG_BASE = 0,
50 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
51 MV_SATAHC0_REG_BASE = 0x20000,
52
53 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
54 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57
58 MV_Q_CT = 32,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61
62 MV_DMA_BOUNDARY = 0xffffffffU,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)),
64
65 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */
69 MV_PORT_MASK = 3,
70
71 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */
75
76 chip_504x = 0,
77 chip_508x = 1,
78 chip_604x = 2,
79 chip_608x = 3,
80
81 /* PCI interface registers */
82
83 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3),
86 GLOB_SFT_RST = (1 << 4),
87
88 PCI_IRQ_CAUSE_OFS = 0x1d58,
89 PCI_IRQ_MASK_OFS = 0x1d5c,
90 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
91
92 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
93 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
94 PORT0_ERR = (1 << 0), /* shift by port # */
95 PORT0_DONE = (1 << 1), /* shift by port # */
96 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
97 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
98 PCI_ERR = (1 << 18),
99 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
100 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
101 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
102 GPIO_INT = (1 << 22),
103 SELF_INT = (1 << 23),
104 TWSI_INT = (1 << 24),
105 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
106 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
107 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
108 HC_MAIN_RSVD),
109
110 /* SATAHC registers */
111 HC_CFG_OFS = 0,
112
113 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */
117
118 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104,
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128
129 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
131 SATA_ACTIVE_OFS = 0x350,
132
133 /* Port registers */
134 EDMA_CFG_OFS = 0,
135
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc,
138 EDMA_ERR_D_PAR = (1 << 0),
139 EDMA_ERR_PRD_PAR = (1 << 1),
140 EDMA_ERR_DEV = (1 << 2),
141 EDMA_ERR_DEV_DCON = (1 << 3),
142 EDMA_ERR_DEV_CON = (1 << 4),
143 EDMA_ERR_SERR = (1 << 5),
144 EDMA_ERR_SELF_DIS = (1 << 7),
145 EDMA_ERR_BIST_ASYNC = (1 << 8),
146 EDMA_ERR_CRBQ_PAR = (1 << 9),
147 EDMA_ERR_CRPB_PAR = (1 << 10),
148 EDMA_ERR_INTRL_PAR = (1 << 11),
149 EDMA_ERR_IORDY = (1 << 12),
150 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
151 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
152 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
153 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
154 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
155 EDMA_ERR_TRANS_PROTO = (1 << 31),
156 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
157 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
158 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
159 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
160 EDMA_ERR_LNK_DATA_RX |
161 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO),
163
164 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2),
168
169 /* BDMA is 6xxx part only */
170 BDMA_CMD_OFS = 0x224,
171 BDMA_START = (1 << 0),
172
173 MV_UNDEF = 0,
174};
175
176struct mv_port_priv {
177
178};
179
180struct mv_host_priv {
181
182};
183
184static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
187static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192
193static Scsi_Host_Template mv_sht = {
194 .module = THIS_MODULE,
195 .name = DRV_NAME,
196 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE,
200 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF,
202 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF,
206 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config,
209 .bios_param = ata_std_bios_param,
210 .ordered_flush = 1,
211};
212
213static struct ata_port_operations mv_ops = {
214 .port_disable = ata_port_disable,
215
216 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read,
218 .check_status = ata_check_status,
219 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select,
221
222 .phy_reset = mv_phy_reset,
223
224 .qc_prep = ata_qc_prep,
225 .qc_issue = ata_qc_issue_prot,
226
227 .eng_timeout = ata_eng_timeout,
228
229 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear,
231
232 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write,
234
235 .port_start = ata_port_start,
236 .port_stop = ata_port_stop,
237 .host_stop = ata_host_stop,
238};
239
240static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */
242 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
245 .pio_mask = 0x1f, /* pio4-0 */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops,
248 },
249 { /* chip_508x */
250 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
253 MV_FLAG_DUAL_HC),
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops,
257 },
258 { /* chip_604x */
259 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA),
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops,
266 },
267 { /* chip_608x */
268 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC |
272 MV_FLAG_BDMA),
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops,
276 },
277};
278
279static struct pci_device_id mv_pci_tbl[] = {
280 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
281 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
282 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x},
283 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
284
285 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
286 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
287 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
288 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
289 {} /* terminate list */
290};
291
292static struct pci_driver mv_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = mv_pci_tbl,
295 .probe = mv_init_one,
296 .remove = ata_pci_remove_one,
297};
298
299/*
300 * Functions
301 */
302
303static inline void writelfl(unsigned long data, void __iomem *addr)
304{
305 writel(data, addr);
306 (void) readl(addr); /* flush to avoid PCI posted write */
307}
308
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
318}
319
320static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
321{
322 return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
323 MV_SATAHC_ARBTR_REG_SZ +
324 ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
325}
326
327static inline void __iomem *mv_ap_base(struct ata_port *ap)
328{
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330}
331
332static inline int mv_get_hc_count(unsigned long flags)
333{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335}
336
337static inline int mv_is_edma_active(struct ata_port *ap)
338{
339 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
341}
342
343static inline int mv_port_bdma_capable(struct ata_port *ap)
344{
345 return (ap->flags & MV_FLAG_BDMA);
346}
347
348static void mv_irq_clear(struct ata_port *ap)
349{
350}
351
352static unsigned int mv_scr_offset(unsigned int sc_reg_in)
353{
354 unsigned int ofs;
355
356 switch (sc_reg_in) {
357 case SCR_STATUS:
358 case SCR_CONTROL:
359 case SCR_ERROR:
360 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
361 break;
362 case SCR_ACTIVE:
363 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
364 break;
365 default:
366 ofs = 0xffffffffU;
367 break;
368 }
369 return ofs;
370}
371
372static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
373{
374 unsigned int ofs = mv_scr_offset(sc_reg_in);
375
376 if (0xffffffffU != ofs) {
377 return readl(mv_ap_base(ap) + ofs);
378 } else {
379 return (u32) ofs;
380 }
381}
382
383static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
384{
385 unsigned int ofs = mv_scr_offset(sc_reg_in);
386
387 if (0xffffffffU != ofs) {
388 writelfl(val, mv_ap_base(ap) + ofs);
389 }
390}
391
392static int mv_master_reset(void __iomem *mmio_base)
393{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0;
396 u32 t;
397
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status
401 * register" table.
402 */
403 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg);
405
406 for (i = 0; i < 100; i++) {
407 msleep(10);
408 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) {
410 break;
411 }
412 }
413 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n");
415 rc = 1; /* broken HW? */
416 goto done;
417 }
418
419 /* set reset */
420 i = 5;
421 do {
422 writel(t | GLOB_SFT_RST, reg);
423 t = readl(reg);
424 udelay(1);
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426
427 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n");
429 rc = 1; /* broken HW? */
430 goto done;
431 }
432
433 /* clear reset */
434 i = 5;
435 do {
436 writel(t & ~GLOB_SFT_RST, reg);
437 t = readl(reg);
438 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440
441 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n");
443 rc = 1; /* broken HW? */
444 }
445
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc;
449}
450
451static void mv_err_intr(struct ata_port *ap)
452{
453 void __iomem *port_mmio;
454 u32 edma_err_cause, serr = 0;
455
456 /* bug here b/c we got an err int on a port we don't know about,
457 * so there's no way to clear it
458 */
459 BUG_ON(NULL == ap);
460 port_mmio = mv_ap_base(ap);
461
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463
464 if (EDMA_ERR_SERR & edma_err_cause) {
465 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr);
467 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n",
469 ap->port_no, edma_err_cause, serr);
470
471 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
473
474 /* check for fatal here and recover if needed */
475 if (EDMA_ERR_FATAL & edma_err_cause) {
476 mv_phy_reset(ap);
477 }
478}
479
480/* Handle any outstanding interrupts in a single SATAHC
481 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc)
484{
485 void __iomem *mmio = host_set->mmio_base;
486 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
487 struct ata_port *ap;
488 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause;
490 int shift, port, port0, hard_port;
491 u8 ata_status;
492
493 if (hc == 0) {
494 port0 = 0;
495 } else {
496 port0 = MV_PORTS_PER_HC;
497 }
498
499 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
503 }
504
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
506 hc,relevant,hc_irq_cause);
507
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU;
512
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap);
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */
516 /* This is needed to clear the ATA INTRQ.
517 * FIXME: don't read the status reg in EDMA mode!
518 */
519 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr);
521 }
522
523 shift = port * 2;
524 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 }
527 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */
530 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR;
532 }
533
534 if (ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status);
542 }
543 }
544 }
545 VPRINTK("EXIT\n");
546}
547
548static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs)
550{
551 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio;
554 u32 irq_stat;
555
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559
560 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault
562 */
563 if (!irq_stat || (0xffffffffU == irq_stat)) {
564 return IRQ_NONE;
565 }
566
567 spin_lock(&host_set->lock);
568
569 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) {
572 mv_host_intr(host_set, relevant, hc);
573 handled = 1;
574 }
575 }
576 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need
578 * to recover from them properly.
579 */
580 }
581
582 spin_unlock(&host_set->lock);
583
584 return IRQ_RETVAL(handled);
585}
586
587static void mv_phy_reset(struct ata_port *ap)
588{
589 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma;
593
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595
596 edma = readl(port_mmio + EDMA_CMD_OFS);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */
610
611 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however.
613 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS);
615
616 VPRINTK("Done. Now calling __sata_phy_reset()\n");
617
618 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap);
620
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
622 VPRINTK("Port disabled pre-sig. Exiting.\n");
623 return;
624 }
625
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
628 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
629 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
630
631 dev->class = ata_dev_classify(&tf);
632 if (!ata_dev_present(dev)) {
633 VPRINTK("Port disabled post-sig: No device present.\n");
634 ata_port_disable(ap);
635 }
636 VPRINTK("EXIT\n");
637}
638
639static void mv_port_init(struct ata_ioports *port, unsigned long base)
640{
641 /* PIO related setup */
642 port->data_addr = base + SHD_PIO_DATA_OFS;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS;
644 port->nsect_addr = base + SHD_SECT_CNT_OFS;
645 port->lbal_addr = base + SHD_LBA_L_OFS;
646 port->lbam_addr = base + SHD_LBA_M_OFS;
647 port->lbah_addr = base + SHD_LBA_H_OFS;
648 port->device_addr = base + SHD_DEV_HD_OFS;
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS;
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS;
651 /* unused */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653
654 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS);
656
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS));
661}
662
663static int mv_host_init(struct ata_probe_ent *probe_ent)
664{
665 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio;
668
669 if (mv_master_reset(probe_ent->mmio_base)) {
670 rc = 1;
671 goto done;
672 }
673
674 n_hc = mv_get_hc_count(probe_ent->host_flags);
675 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
676
677 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio);
680 }
681
682 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc,
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS),
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS));
686 }
687
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
690
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n",
693 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS));
697
698 done:
699 return rc;
700}
701
702/* move to PCI layer, integrate w/ MSI stuff */
703static void pci_intx(struct pci_dev *pdev, int enable)
704{
705 u16 pci_command, new;
706
707 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
708
709 if (enable)
710 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
711 else
712 new = pci_command | PCI_COMMAND_INTX_DISABLE;
713
714 if (new != pci_command)
715 pci_write_config_word(pdev, PCI_COMMAND, pci_command);
716}
717
718static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
719{
720 static int printed_version = 0;
721 struct ata_probe_ent *probe_ent = NULL;
722 struct mv_host_priv *hpriv;
723 unsigned int board_idx = (unsigned int)ent->driver_data;
724 void __iomem *mmio_base;
725 int pci_dev_busy = 0;
726 int rc;
727
728 if (!printed_version++) {
729 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
730 }
731
732 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
733 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
734
735 rc = pci_enable_device(pdev);
736 if (rc) {
737 return rc;
738 }
739
740 rc = pci_request_regions(pdev, DRV_NAME);
741 if (rc) {
742 pci_dev_busy = 1;
743 goto err_out;
744 }
745
746 pci_intx(pdev, 1);
747
748 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
749 if (probe_ent == NULL) {
750 rc = -ENOMEM;
751 goto err_out_regions;
752 }
753
754 memset(probe_ent, 0, sizeof(*probe_ent));
755 probe_ent->dev = pci_dev_to_dev(pdev);
756 INIT_LIST_HEAD(&probe_ent->node);
757
758 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR),
759 pci_resource_len(pdev, MV_PRIMARY_BAR));
760 if (mmio_base == NULL) {
761 rc = -ENOMEM;
762 goto err_out_free_ent;
763 }
764
765 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
766 if (!hpriv) {
767 rc = -ENOMEM;
768 goto err_out_iounmap;
769 }
770 memset(hpriv, 0, sizeof(*hpriv));
771
772 probe_ent->sht = mv_port_info[board_idx].sht;
773 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
774 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
775 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
776 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
777
778 probe_ent->irq = pdev->irq;
779 probe_ent->irq_flags = SA_SHIRQ;
780 probe_ent->mmio_base = mmio_base;
781 probe_ent->private_data = hpriv;
782
783 /* initialize adapter */
784 rc = mv_host_init(probe_ent);
785 if (rc) {
786 goto err_out_hpriv;
787 }
788/* mv_print_info(probe_ent); */
789
790 {
791 int b, w;
792 u32 dw[4]; /* hold a line of 16b */
793 VPRINTK("PCI config space:\n");
794 for (b = 0; b < 0x40; ) {
795 for (w = 0; w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw[w]);
797 b += sizeof(*dw);
798 }
799 VPRINTK("%08x %08x %08x %08x\n",
800 dw[0],dw[1],dw[2],dw[3]);
801 }
802 }
803
804 /* FIXME: check ata_device_add return value */
805 ata_device_add(probe_ent);
806 kfree(probe_ent);
807
808 return 0;
809
810 err_out_hpriv:
811 kfree(hpriv);
812 err_out_iounmap:
813 iounmap(mmio_base);
814 err_out_free_ent:
815 kfree(probe_ent);
816 err_out_regions:
817 pci_release_regions(pdev);
818 err_out:
819 if (!pci_dev_busy) {
820 pci_disable_device(pdev);
821 }
822
823 return rc;
824}
825
826static int __init mv_init(void)
827{
828 return pci_module_init(&mv_pci_driver);
829}
830
831static void __exit mv_exit(void)
832{
833 pci_unregister_driver(&mv_pci_driver);
834}
835
836MODULE_AUTHOR("Brett Russ");
837MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
838MODULE_LICENSE("GPL");
839MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
840MODULE_VERSION(DRV_VERSION);
841
842module_init(mv_init);
843module_exit(mv_exit);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 03d9bc6e69df..a1d62dee3be6 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -351,6 +351,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
351static void nv_host_stop (struct ata_host_set *host_set) 351static void nv_host_stop (struct ata_host_set *host_set)
352{ 352{
353 struct nv_host *host = host_set->private_data; 353 struct nv_host *host = host_set->private_data;
354 struct pci_dev *pdev = to_pci_dev(host_set->dev);
354 355
355 // Disable hotplug event interrupts. 356 // Disable hotplug event interrupts.
356 if (host->host_desc->disable_hotplug) 357 if (host->host_desc->disable_hotplug)
@@ -358,7 +359,8 @@ static void nv_host_stop (struct ata_host_set *host_set)
358 359
359 kfree(host); 360 kfree(host);
360 361
361 ata_host_stop(host_set); 362 if (host_set->mmio_base)
363 pci_iounmap(pdev, host_set->mmio_base);
362} 364}
363 365
364static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 366static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -420,8 +422,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
420 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { 422 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
421 unsigned long base; 423 unsigned long base;
422 424
423 probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5), 425 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
424 pci_resource_len(pdev, 5));
425 if (probe_ent->mmio_base == NULL) { 426 if (probe_ent->mmio_base == NULL) {
426 rc = -EIO; 427 rc = -EIO;
427 goto err_out_free_host; 428 goto err_out_free_host;
@@ -457,7 +458,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
457 458
458err_out_iounmap: 459err_out_iounmap:
459 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 460 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
460 iounmap(probe_ent->mmio_base); 461 pci_iounmap(pdev, probe_ent->mmio_base);
461err_out_free_host: 462err_out_free_host:
462 kfree(host); 463 kfree(host);
463err_out_free_ent: 464err_out_free_ent:
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 7c4f6ecc1cc9..538ad727bd2e 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -92,6 +92,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
92static void pdc_irq_clear(struct ata_port *ap); 92static void pdc_irq_clear(struct ata_port *ap);
93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
94 94
95
95static Scsi_Host_Template pdc_ata_sht = { 96static Scsi_Host_Template pdc_ata_sht = {
96 .module = THIS_MODULE, 97 .module = THIS_MODULE,
97 .name = DRV_NAME, 98 .name = DRV_NAME,
@@ -132,7 +133,7 @@ static struct ata_port_operations pdc_sata_ops = {
132 .scr_write = pdc_sata_scr_write, 133 .scr_write = pdc_sata_scr_write,
133 .port_start = pdc_port_start, 134 .port_start = pdc_port_start,
134 .port_stop = pdc_port_stop, 135 .port_stop = pdc_port_stop,
135 .host_stop = ata_host_stop, 136 .host_stop = ata_pci_host_stop,
136}; 137};
137 138
138static struct ata_port_operations pdc_pata_ops = { 139static struct ata_port_operations pdc_pata_ops = {
@@ -153,7 +154,7 @@ static struct ata_port_operations pdc_pata_ops = {
153 154
154 .port_start = pdc_port_start, 155 .port_start = pdc_port_start,
155 .port_stop = pdc_port_stop, 156 .port_stop = pdc_port_stop,
156 .host_stop = ata_host_stop, 157 .host_stop = ata_pci_host_stop,
157}; 158};
158 159
159static struct ata_port_info pdc_port_info[] = { 160static struct ata_port_info pdc_port_info[] = {
@@ -282,7 +283,7 @@ static void pdc_port_stop(struct ata_port *ap)
282 283
283static void pdc_reset_port(struct ata_port *ap) 284static void pdc_reset_port(struct ata_port *ap)
284{ 285{
285 void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 286 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
286 unsigned int i; 287 unsigned int i;
287 u32 tmp; 288 u32 tmp;
288 289
@@ -418,7 +419,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
418 u8 status; 419 u8 status;
419 unsigned int handled = 0, have_err = 0; 420 unsigned int handled = 0, have_err = 0;
420 u32 tmp; 421 u32 tmp;
421 void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; 422 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
422 423
423 tmp = readl(mmio); 424 tmp = readl(mmio);
424 if (tmp & PDC_ERR_MASK) { 425 if (tmp & PDC_ERR_MASK) {
@@ -447,7 +448,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
447static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
448{ 449{
449 struct ata_host_set *host_set = ap->host_set; 450 struct ata_host_set *host_set = ap->host_set;
450 void *mmio = host_set->mmio_base; 451 void __iomem *mmio = host_set->mmio_base;
451 452
452 readl(mmio + PDC_INT_SEQMASK); 453 readl(mmio + PDC_INT_SEQMASK);
453} 454}
@@ -459,7 +460,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
459 u32 mask = 0; 460 u32 mask = 0;
460 unsigned int i, tmp; 461 unsigned int i, tmp;
461 unsigned int handled = 0; 462 unsigned int handled = 0;
462 void *mmio_base; 463 void __iomem *mmio_base;
463 464
464 VPRINTK("ENTER\n"); 465 VPRINTK("ENTER\n");
465 466
@@ -581,7 +582,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
581 582
582static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 583static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
583{ 584{
584 void *mmio = pe->mmio_base; 585 void __iomem *mmio = pe->mmio_base;
585 u32 tmp; 586 u32 tmp;
586 587
587 /* 588 /*
@@ -624,7 +625,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
624 static int printed_version; 625 static int printed_version;
625 struct ata_probe_ent *probe_ent = NULL; 626 struct ata_probe_ent *probe_ent = NULL;
626 unsigned long base; 627 unsigned long base;
627 void *mmio_base; 628 void __iomem *mmio_base;
628 unsigned int board_idx = (unsigned int) ent->driver_data; 629 unsigned int board_idx = (unsigned int) ent->driver_data;
629 int pci_dev_busy = 0; 630 int pci_dev_busy = 0;
630 int rc; 631 int rc;
@@ -663,8 +664,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
663 probe_ent->dev = pci_dev_to_dev(pdev); 664 probe_ent->dev = pci_dev_to_dev(pdev);
664 INIT_LIST_HEAD(&probe_ent->node); 665 INIT_LIST_HEAD(&probe_ent->node);
665 666
666 mmio_base = ioremap(pci_resource_start(pdev, 3), 667 mmio_base = pci_iomap(pdev, 3, 0);
667 pci_resource_len(pdev, 3));
668 if (mmio_base == NULL) { 668 if (mmio_base == NULL) {
669 rc = -ENOMEM; 669 rc = -ENOMEM;
670 goto err_out_free_ent; 670 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 9c99ab433bd3..ffcdeb68641c 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -494,7 +494,7 @@ static int qs_port_start(struct ata_port *ap)
494 if (rc) 494 if (rc)
495 return rc; 495 return rc;
496 qs_enter_reg_mode(ap); 496 qs_enter_reg_mode(ap);
497 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); 497 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
498 if (!pp) { 498 if (!pp) {
499 rc = -ENOMEM; 499 rc = -ENOMEM;
500 goto err_out; 500 goto err_out;
@@ -538,11 +538,12 @@ static void qs_port_stop(struct ata_port *ap)
538static void qs_host_stop(struct ata_host_set *host_set) 538static void qs_host_stop(struct ata_host_set *host_set)
539{ 539{
540 void __iomem *mmio_base = host_set->mmio_base; 540 void __iomem *mmio_base = host_set->mmio_base;
541 struct pci_dev *pdev = to_pci_dev(host_set->dev);
541 542
542 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 543 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
543 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ 544 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
544 545
545 ata_host_stop(host_set); 546 pci_iounmap(pdev, mmio_base);
546} 547}
547 548
548static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 549static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
@@ -646,8 +647,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
646 goto err_out_regions; 647 goto err_out_regions;
647 } 648 }
648 649
649 mmio_base = ioremap(pci_resource_start(pdev, 4), 650 mmio_base = pci_iomap(pdev, 4, 0);
650 pci_resource_len(pdev, 4));
651 if (mmio_base == NULL) { 651 if (mmio_base == NULL) {
652 rc = -ENOMEM; 652 rc = -ENOMEM;
653 goto err_out_regions; 653 goto err_out_regions;
@@ -697,7 +697,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
697 return 0; 697 return 0;
698 698
699err_out_iounmap: 699err_out_iounmap:
700 iounmap(mmio_base); 700 pci_iounmap(pdev, mmio_base);
701err_out_regions: 701err_out_regions:
702 pci_release_regions(pdev); 702 pci_release_regions(pdev);
703err_out: 703err_out:
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 71d49548f0a3..ba98a175ee3a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -86,6 +86,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 86static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static void sil_post_set_mode (struct ata_port *ap); 87static void sil_post_set_mode (struct ata_port *ap);
88 88
89
89static struct pci_device_id sil_pci_tbl[] = { 90static struct pci_device_id sil_pci_tbl[] = {
90 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 91 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
91 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 92 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
@@ -172,7 +173,7 @@ static struct ata_port_operations sil_ops = {
172 .scr_write = sil_scr_write, 173 .scr_write = sil_scr_write,
173 .port_start = ata_port_start, 174 .port_start = ata_port_start,
174 .port_stop = ata_port_stop, 175 .port_stop = ata_port_stop,
175 .host_stop = ata_host_stop, 176 .host_stop = ata_pci_host_stop,
176}; 177};
177 178
178static struct ata_port_info sil_port_info[] = { 179static struct ata_port_info sil_port_info[] = {
@@ -231,6 +232,7 @@ MODULE_LICENSE("GPL");
231MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 232MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
232MODULE_VERSION(DRV_VERSION); 233MODULE_VERSION(DRV_VERSION);
233 234
235
234static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 236static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
235{ 237{
236 u8 cache_line = 0; 238 u8 cache_line = 0;
@@ -242,7 +244,8 @@ static void sil_post_set_mode (struct ata_port *ap)
242{ 244{
243 struct ata_host_set *host_set = ap->host_set; 245 struct ata_host_set *host_set = ap->host_set;
244 struct ata_device *dev; 246 struct ata_device *dev;
245 void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode; 247 void __iomem *addr =
248 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
246 u32 tmp, dev_mode[2]; 249 u32 tmp, dev_mode[2];
247 unsigned int i; 250 unsigned int i;
248 251
@@ -375,7 +378,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
375 static int printed_version; 378 static int printed_version;
376 struct ata_probe_ent *probe_ent = NULL; 379 struct ata_probe_ent *probe_ent = NULL;
377 unsigned long base; 380 unsigned long base;
378 void *mmio_base; 381 void __iomem *mmio_base;
379 int rc; 382 int rc;
380 unsigned int i; 383 unsigned int i;
381 int pci_dev_busy = 0; 384 int pci_dev_busy = 0;
@@ -425,8 +428,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
425 probe_ent->irq_flags = SA_SHIRQ; 428 probe_ent->irq_flags = SA_SHIRQ;
426 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; 429 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
427 430
428 mmio_base = ioremap(pci_resource_start(pdev, 5), 431 mmio_base = pci_iomap(pdev, 5, 0);
429 pci_resource_len(pdev, 5));
430 if (mmio_base == NULL) { 432 if (mmio_base == NULL) {
431 rc = -ENOMEM; 433 rc = -ENOMEM;
432 goto err_out_free_ent; 434 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 19d3bb3b0fb6..d89d968bedac 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -318,7 +318,7 @@ static struct ata_port_operations k2_sata_ops = {
318 .scr_write = k2_sata_scr_write, 318 .scr_write = k2_sata_scr_write,
319 .port_start = ata_port_start, 319 .port_start = ata_port_start,
320 .port_stop = ata_port_stop, 320 .port_stop = ata_port_stop,
321 .host_stop = ata_host_stop, 321 .host_stop = ata_pci_host_stop,
322}; 322};
323 323
324static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) 324static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -346,7 +346,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
346 static int printed_version; 346 static int printed_version;
347 struct ata_probe_ent *probe_ent = NULL; 347 struct ata_probe_ent *probe_ent = NULL;
348 unsigned long base; 348 unsigned long base;
349 void *mmio_base; 349 void __iomem *mmio_base;
350 int pci_dev_busy = 0; 350 int pci_dev_busy = 0;
351 int rc; 351 int rc;
352 int i; 352 int i;
@@ -392,8 +392,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
392 probe_ent->dev = pci_dev_to_dev(pdev); 392 probe_ent->dev = pci_dev_to_dev(pdev);
393 INIT_LIST_HEAD(&probe_ent->node); 393 INIT_LIST_HEAD(&probe_ent->node);
394 394
395 mmio_base = ioremap(pci_resource_start(pdev, 5), 395 mmio_base = pci_iomap(pdev, 5, 0);
396 pci_resource_len(pdev, 5));
397 if (mmio_base == NULL) { 396 if (mmio_base == NULL) {
398 rc = -ENOMEM; 397 rc = -ENOMEM;
399 goto err_out_free_ent; 398 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index c72fcc46f0fa..540a85191172 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -245,13 +245,14 @@ static struct pci_driver pdc_sata_pci_driver = {
245 245
246static void pdc20621_host_stop(struct ata_host_set *host_set) 246static void pdc20621_host_stop(struct ata_host_set *host_set)
247{ 247{
248 struct pci_dev *pdev = to_pci_dev(host_set->dev);
248 struct pdc_host_priv *hpriv = host_set->private_data; 249 struct pdc_host_priv *hpriv = host_set->private_data;
249 void *dimm_mmio = hpriv->dimm_mmio; 250 void *dimm_mmio = hpriv->dimm_mmio;
250 251
251 iounmap(dimm_mmio); 252 pci_iounmap(pdev, dimm_mmio);
252 kfree(hpriv); 253 kfree(hpriv);
253 254
254 ata_host_stop(host_set); 255 pci_iounmap(pdev, host_set->mmio_base);
255} 256}
256 257
257static int pdc_port_start(struct ata_port *ap) 258static int pdc_port_start(struct ata_port *ap)
@@ -451,9 +452,9 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
451 struct scatterlist *sg = qc->sg; 452 struct scatterlist *sg = qc->sg;
452 struct ata_port *ap = qc->ap; 453 struct ata_port *ap = qc->ap;
453 struct pdc_port_priv *pp = ap->private_data; 454 struct pdc_port_priv *pp = ap->private_data;
454 void *mmio = ap->host_set->mmio_base; 455 void __iomem *mmio = ap->host_set->mmio_base;
455 struct pdc_host_priv *hpriv = ap->host_set->private_data; 456 struct pdc_host_priv *hpriv = ap->host_set->private_data;
456 void *dimm_mmio = hpriv->dimm_mmio; 457 void __iomem *dimm_mmio = hpriv->dimm_mmio;
457 unsigned int portno = ap->port_no; 458 unsigned int portno = ap->port_no;
458 unsigned int i, last, idx, total_len = 0, sgt_len; 459 unsigned int i, last, idx, total_len = 0, sgt_len;
459 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 460 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
@@ -513,9 +514,9 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
513{ 514{
514 struct ata_port *ap = qc->ap; 515 struct ata_port *ap = qc->ap;
515 struct pdc_port_priv *pp = ap->private_data; 516 struct pdc_port_priv *pp = ap->private_data;
516 void *mmio = ap->host_set->mmio_base; 517 void __iomem *mmio = ap->host_set->mmio_base;
517 struct pdc_host_priv *hpriv = ap->host_set->private_data; 518 struct pdc_host_priv *hpriv = ap->host_set->private_data;
518 void *dimm_mmio = hpriv->dimm_mmio; 519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
519 unsigned int portno = ap->port_no; 520 unsigned int portno = ap->port_no;
520 unsigned int i; 521 unsigned int i;
521 522
@@ -565,7 +566,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
565{ 566{
566 struct ata_port *ap = qc->ap; 567 struct ata_port *ap = qc->ap;
567 struct ata_host_set *host_set = ap->host_set; 568 struct ata_host_set *host_set = ap->host_set;
568 void *mmio = host_set->mmio_base; 569 void __iomem *mmio = host_set->mmio_base;
569 570
570 /* hard-code chip #0 */ 571 /* hard-code chip #0 */
571 mmio += PDC_CHIP0_OFS; 572 mmio += PDC_CHIP0_OFS;
@@ -639,7 +640,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639 struct ata_port *ap = qc->ap; 640 struct ata_port *ap = qc->ap;
640 struct ata_host_set *host_set = ap->host_set; 641 struct ata_host_set *host_set = ap->host_set;
641 unsigned int port_no = ap->port_no; 642 unsigned int port_no = ap->port_no;
642 void *mmio = host_set->mmio_base; 643 void __iomem *mmio = host_set->mmio_base;
643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
644 u8 seq = (u8) (port_no + 1); 645 u8 seq = (u8) (port_no + 1);
645 unsigned int port_ofs; 646 unsigned int port_ofs;
@@ -699,7 +700,7 @@ static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
699static inline unsigned int pdc20621_host_intr( struct ata_port *ap, 700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
700 struct ata_queued_cmd *qc, 701 struct ata_queued_cmd *qc,
701 unsigned int doing_hdma, 702 unsigned int doing_hdma,
702 void *mmio) 703 void __iomem *mmio)
703{ 704{
704 unsigned int port_no = ap->port_no; 705 unsigned int port_no = ap->port_no;
705 unsigned int port_ofs = 706 unsigned int port_ofs =
@@ -778,7 +779,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
778static void pdc20621_irq_clear(struct ata_port *ap) 779static void pdc20621_irq_clear(struct ata_port *ap)
779{ 780{
780 struct ata_host_set *host_set = ap->host_set; 781 struct ata_host_set *host_set = ap->host_set;
781 void *mmio = host_set->mmio_base; 782 void __iomem *mmio = host_set->mmio_base;
782 783
783 mmio += PDC_CHIP0_OFS; 784 mmio += PDC_CHIP0_OFS;
784 785
@@ -792,7 +793,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
792 u32 mask = 0; 793 u32 mask = 0;
793 unsigned int i, tmp, port_no; 794 unsigned int i, tmp, port_no;
794 unsigned int handled = 0; 795 unsigned int handled = 0;
795 void *mmio_base; 796 void __iomem *mmio_base;
796 797
797 VPRINTK("ENTER\n"); 798 VPRINTK("ENTER\n");
798 799
@@ -940,9 +941,9 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
940 u16 idx; 941 u16 idx;
941 u8 page_mask; 942 u8 page_mask;
942 long dist; 943 long dist;
943 void *mmio = pe->mmio_base; 944 void __iomem *mmio = pe->mmio_base;
944 struct pdc_host_priv *hpriv = pe->private_data; 945 struct pdc_host_priv *hpriv = pe->private_data;
945 void *dimm_mmio = hpriv->dimm_mmio; 946 void __iomem *dimm_mmio = hpriv->dimm_mmio;
946 947
947 /* hard-code chip #0 */ 948 /* hard-code chip #0 */
948 mmio += PDC_CHIP0_OFS; 949 mmio += PDC_CHIP0_OFS;
@@ -996,9 +997,9 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
996 u16 idx; 997 u16 idx;
997 u8 page_mask; 998 u8 page_mask;
998 long dist; 999 long dist;
999 void *mmio = pe->mmio_base; 1000 void __iomem *mmio = pe->mmio_base;
1000 struct pdc_host_priv *hpriv = pe->private_data; 1001 struct pdc_host_priv *hpriv = pe->private_data;
1001 void *dimm_mmio = hpriv->dimm_mmio; 1002 void __iomem *dimm_mmio = hpriv->dimm_mmio;
1002 1003
1003 /* hard-code chip #0 */ 1004 /* hard-code chip #0 */
1004 mmio += PDC_CHIP0_OFS; 1005 mmio += PDC_CHIP0_OFS;
@@ -1044,7 +1045,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1044static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, 1045static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1045 u32 subaddr, u32 *pdata) 1046 u32 subaddr, u32 *pdata)
1046{ 1047{
1047 void *mmio = pe->mmio_base; 1048 void __iomem *mmio = pe->mmio_base;
1048 u32 i2creg = 0; 1049 u32 i2creg = 0;
1049 u32 status; 1050 u32 status;
1050 u32 count =0; 1051 u32 count =0;
@@ -1103,7 +1104,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1103 u32 data = 0; 1104 u32 data = 0;
1104 int size, i; 1105 int size, i;
1105 u8 bdimmsize; 1106 u8 bdimmsize;
1106 void *mmio = pe->mmio_base; 1107 void __iomem *mmio = pe->mmio_base;
1107 static const struct { 1108 static const struct {
1108 unsigned int reg; 1109 unsigned int reg;
1109 unsigned int ofs; 1110 unsigned int ofs;
@@ -1166,7 +1167,7 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1166{ 1167{
1167 u32 data, spd0; 1168 u32 data, spd0;
1168 int error, i; 1169 int error, i;
1169 void *mmio = pe->mmio_base; 1170 void __iomem *mmio = pe->mmio_base;
1170 1171
1171 /* hard-code chip #0 */ 1172 /* hard-code chip #0 */
1172 mmio += PDC_CHIP0_OFS; 1173 mmio += PDC_CHIP0_OFS;
@@ -1220,7 +1221,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1220 u32 ticks=0; 1221 u32 ticks=0;
1221 u32 clock=0; 1222 u32 clock=0;
1222 u32 fparam=0; 1223 u32 fparam=0;
1223 void *mmio = pe->mmio_base; 1224 void __iomem *mmio = pe->mmio_base;
1224 1225
1225 /* hard-code chip #0 */ 1226 /* hard-code chip #0 */
1226 mmio += PDC_CHIP0_OFS; 1227 mmio += PDC_CHIP0_OFS;
@@ -1344,7 +1345,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1344static void pdc_20621_init(struct ata_probe_ent *pe) 1345static void pdc_20621_init(struct ata_probe_ent *pe)
1345{ 1346{
1346 u32 tmp; 1347 u32 tmp;
1347 void *mmio = pe->mmio_base; 1348 void __iomem *mmio = pe->mmio_base;
1348 1349
1349 /* hard-code chip #0 */ 1350 /* hard-code chip #0 */
1350 mmio += PDC_CHIP0_OFS; 1351 mmio += PDC_CHIP0_OFS;
@@ -1377,7 +1378,8 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1377 static int printed_version; 1378 static int printed_version;
1378 struct ata_probe_ent *probe_ent = NULL; 1379 struct ata_probe_ent *probe_ent = NULL;
1379 unsigned long base; 1380 unsigned long base;
1380 void *mmio_base, *dimm_mmio = NULL; 1381 void __iomem *mmio_base;
1382 void __iomem *dimm_mmio = NULL;
1381 struct pdc_host_priv *hpriv = NULL; 1383 struct pdc_host_priv *hpriv = NULL;
1382 unsigned int board_idx = (unsigned int) ent->driver_data; 1384 unsigned int board_idx = (unsigned int) ent->driver_data;
1383 int pci_dev_busy = 0; 1385 int pci_dev_busy = 0;
@@ -1417,8 +1419,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1417 probe_ent->dev = pci_dev_to_dev(pdev); 1419 probe_ent->dev = pci_dev_to_dev(pdev);
1418 INIT_LIST_HEAD(&probe_ent->node); 1420 INIT_LIST_HEAD(&probe_ent->node);
1419 1421
1420 mmio_base = ioremap(pci_resource_start(pdev, 3), 1422 mmio_base = pci_iomap(pdev, 3, 0);
1421 pci_resource_len(pdev, 3));
1422 if (mmio_base == NULL) { 1423 if (mmio_base == NULL) {
1423 rc = -ENOMEM; 1424 rc = -ENOMEM;
1424 goto err_out_free_ent; 1425 goto err_out_free_ent;
@@ -1432,8 +1433,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1432 } 1433 }
1433 memset(hpriv, 0, sizeof(*hpriv)); 1434 memset(hpriv, 0, sizeof(*hpriv));
1434 1435
1435 dimm_mmio = ioremap(pci_resource_start(pdev, 4), 1436 dimm_mmio = pci_iomap(pdev, 4, 0);
1436 pci_resource_len(pdev, 4));
1437 if (!dimm_mmio) { 1437 if (!dimm_mmio) {
1438 kfree(hpriv); 1438 kfree(hpriv);
1439 rc = -ENOMEM; 1439 rc = -ENOMEM;
@@ -1480,9 +1480,9 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1480 1480
1481err_out_iounmap_dimm: /* only get to this label if 20621 */ 1481err_out_iounmap_dimm: /* only get to this label if 20621 */
1482 kfree(hpriv); 1482 kfree(hpriv);
1483 iounmap(dimm_mmio); 1483 pci_iounmap(pdev, dimm_mmio);
1484err_out_iounmap: 1484err_out_iounmap:
1485 iounmap(mmio_base); 1485 pci_iounmap(pdev, mmio_base);
1486err_out_free_ent: 1486err_out_free_ent:
1487 kfree(probe_ent); 1487 kfree(probe_ent);
1488err_out_regions: 1488err_out_regions:
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 1566886815fb..42e13ed8eb5b 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -125,8 +125,8 @@ static struct ata_port_info uli_port_info = {
125 .sht = &uli_sht, 125 .sht = &uli_sht,
126 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 126 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
127 ATA_FLAG_NO_LEGACY, 127 ATA_FLAG_NO_LEGACY,
128 .pio_mask = 0x03, //support pio mode 4 (FIXME) 128 .pio_mask = 0x1f, /* pio0-4 */
129 .udma_mask = 0x7f, //support udma mode 6 129 .udma_mask = 0x7f, /* udma0-6 */
130 .port_ops = &uli_ops, 130 .port_ops = &uli_ops,
131}; 131};
132 132
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 3985f344da4d..cf94e0158a8d 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -252,7 +252,7 @@ static struct ata_port_operations vsc_sata_ops = {
252 .scr_write = vsc_sata_scr_write, 252 .scr_write = vsc_sata_scr_write,
253 .port_start = ata_port_start, 253 .port_start = ata_port_start,
254 .port_stop = ata_port_stop, 254 .port_stop = ata_port_stop,
255 .host_stop = ata_host_stop, 255 .host_stop = ata_pci_host_stop,
256}; 256};
257 257
258static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) 258static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -326,8 +326,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
326 probe_ent->dev = pci_dev_to_dev(pdev); 326 probe_ent->dev = pci_dev_to_dev(pdev);
327 INIT_LIST_HEAD(&probe_ent->node); 327 INIT_LIST_HEAD(&probe_ent->node);
328 328
329 mmio_base = ioremap(pci_resource_start(pdev, 0), 329 mmio_base = pci_iomap(pdev, 0, 0);
330 pci_resource_len(pdev, 0));
331 if (mmio_base == NULL) { 330 if (mmio_base == NULL) {
332 rc = -ENOMEM; 331 rc = -ENOMEM;
333 goto err_out_free_ent; 332 goto err_out_free_ent;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d14523d7e449..a780546eda9c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -268,6 +268,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
268 } else 268 } else
269 put_device(&dev->sdev_gendev); 269 put_device(&dev->sdev_gendev);
270 270
271 cmd->jiffies_at_alloc = jiffies;
271 return cmd; 272 return cmd;
272} 273}
273EXPORT_SYMBOL(scsi_get_command); 274EXPORT_SYMBOL(scsi_get_command);
@@ -627,7 +628,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
627 spin_lock_irqsave(host->host_lock, flags); 628 spin_lock_irqsave(host->host_lock, flags);
628 scsi_cmd_get_serial(host, cmd); 629 scsi_cmd_get_serial(host, cmd);
629 630
630 if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { 631 if (unlikely(host->shost_state == SHOST_DEL)) {
631 cmd->result = (DID_NO_CONNECT << 16); 632 cmd->result = (DID_NO_CONNECT << 16);
632 scsi_done(cmd); 633 scsi_done(cmd);
633 } else { 634 } else {
@@ -798,9 +799,23 @@ static void scsi_softirq(struct softirq_action *h)
798 while (!list_empty(&local_q)) { 799 while (!list_empty(&local_q)) {
799 struct scsi_cmnd *cmd = list_entry(local_q.next, 800 struct scsi_cmnd *cmd = list_entry(local_q.next,
800 struct scsi_cmnd, eh_entry); 801 struct scsi_cmnd, eh_entry);
802 /* The longest time any command should be outstanding is the
803 * per command timeout multiplied by the number of retries.
804 *
805 * For a typical command, this is 2.5 minutes */
806 unsigned long wait_for
807 = cmd->allowed * cmd->timeout_per_command;
801 list_del_init(&cmd->eh_entry); 808 list_del_init(&cmd->eh_entry);
802 809
803 disposition = scsi_decide_disposition(cmd); 810 disposition = scsi_decide_disposition(cmd);
811 if (disposition != SUCCESS &&
812 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
813 dev_printk(KERN_ERR, &cmd->device->sdev_gendev,
814 "timing out command, waited %lus\n",
815 wait_for/HZ);
816 disposition = SUCCESS;
817 }
818
804 scsi_log_completion(cmd, disposition); 819 scsi_log_completion(cmd, disposition);
805 switch (disposition) { 820 switch (disposition) {
806 case SUCCESS: 821 case SUCCESS:
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6121dc1bfada..07b554affcf2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -114,6 +114,7 @@ static struct {
114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ 115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
116 {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ 116 {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */
117 {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */
117 118
118 /* 119 /*
119 * Other types of devices that have special flags. 120 * Other types of devices that have special flags.
@@ -135,7 +136,7 @@ static struct {
135 {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, 136 {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
136 {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 137 {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
137 {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, 138 {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN},
138 {"DEC", "HSG80", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, 139 {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
139 {"DELL", "PV660F", NULL, BLIST_SPARSELUN}, 140 {"DELL", "PV660F", NULL, BLIST_SPARSELUN},
140 {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, 141 {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN},
141 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ 142 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
@@ -191,6 +192,7 @@ static struct {
191 {"SGI", "RAID5", "*", BLIST_SPARSELUN}, 192 {"SGI", "RAID5", "*", BLIST_SPARSELUN},
192 {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, 193 {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
193 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 194 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
195 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
194 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 196 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
195 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 197 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
196 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 198 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0fc8b48f052b..895c9452be4c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kthread.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
@@ -75,7 +76,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
75 76
76 scmd->eh_eflags |= eh_flag; 77 scmd->eh_eflags |= eh_flag;
77 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 78 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
78 set_bit(SHOST_RECOVERY, &shost->shost_state); 79 scsi_host_set_state(shost, SHOST_RECOVERY);
79 shost->host_failed++; 80 shost->host_failed++;
80 scsi_eh_wakeup(shost); 81 scsi_eh_wakeup(shost);
81 spin_unlock_irqrestore(shost->host_lock, flags); 82 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -115,7 +116,6 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
115 116
116 add_timer(&scmd->eh_timeout); 117 add_timer(&scmd->eh_timeout);
117} 118}
118EXPORT_SYMBOL(scsi_add_timer);
119 119
120/** 120/**
121 * scsi_delete_timer - Delete/cancel timer for a given function. 121 * scsi_delete_timer - Delete/cancel timer for a given function.
@@ -143,7 +143,6 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
143 143
144 return rtn; 144 return rtn;
145} 145}
146EXPORT_SYMBOL(scsi_delete_timer);
147 146
148/** 147/**
149 * scsi_times_out - Timeout function for normal scsi commands. 148 * scsi_times_out - Timeout function for normal scsi commands.
@@ -197,7 +196,8 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
197{ 196{
198 int online; 197 int online;
199 198
200 wait_event(sdev->host->host_wait, (!test_bit(SHOST_RECOVERY, &sdev->host->shost_state))); 199 wait_event(sdev->host->host_wait, (sdev->host->shost_state !=
200 SHOST_RECOVERY));
201 201
202 online = scsi_device_online(sdev); 202 online = scsi_device_online(sdev);
203 203
@@ -775,9 +775,11 @@ retry_tur:
775 __FUNCTION__, scmd, rtn)); 775 __FUNCTION__, scmd, rtn));
776 if (rtn == SUCCESS) 776 if (rtn == SUCCESS)
777 return 0; 777 return 0;
778 else if (rtn == NEEDS_RETRY) 778 else if (rtn == NEEDS_RETRY) {
779 if (retry_cnt--) 779 if (retry_cnt--)
780 goto retry_tur; 780 goto retry_tur;
781 return 0;
782 }
781 return 1; 783 return 1;
782} 784}
783 785
@@ -1458,7 +1460,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1458 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1460 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1459 __FUNCTION__)); 1461 __FUNCTION__));
1460 1462
1461 clear_bit(SHOST_RECOVERY, &shost->shost_state); 1463 scsi_host_set_state(shost, SHOST_RUNNING);
1462 1464
1463 wake_up(&shost->host_wait); 1465 wake_up(&shost->host_wait);
1464 1466
@@ -1582,16 +1584,8 @@ int scsi_error_handler(void *data)
1582 int rtn; 1584 int rtn;
1583 DECLARE_MUTEX_LOCKED(sem); 1585 DECLARE_MUTEX_LOCKED(sem);
1584 1586
1585 /*
1586 * Flush resources
1587 */
1588
1589 daemonize("scsi_eh_%d", shost->host_no);
1590
1591 current->flags |= PF_NOFREEZE; 1587 current->flags |= PF_NOFREEZE;
1592
1593 shost->eh_wait = &sem; 1588 shost->eh_wait = &sem;
1594 shost->ehandler = current;
1595 1589
1596 /* 1590 /*
1597 * Wake up the thread that created us. 1591 * Wake up the thread that created us.
@@ -1599,8 +1593,6 @@ int scsi_error_handler(void *data)
1599 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1593 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
1600 " scsi_eh_%d\n",shost->host_no)); 1594 " scsi_eh_%d\n",shost->host_no));
1601 1595
1602 complete(shost->eh_notify);
1603
1604 while (1) { 1596 while (1) {
1605 /* 1597 /*
1606 * If we get a signal, it means we are supposed to go 1598 * If we get a signal, it means we are supposed to go
@@ -1621,7 +1613,7 @@ int scsi_error_handler(void *data)
1621 * semaphores isn't unreasonable. 1613 * semaphores isn't unreasonable.
1622 */ 1614 */
1623 down_interruptible(&sem); 1615 down_interruptible(&sem);
1624 if (shost->eh_kill) 1616 if (kthread_should_stop())
1625 break; 1617 break;
1626 1618
1627 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
@@ -1660,22 +1652,6 @@ int scsi_error_handler(void *data)
1660 * Make sure that nobody tries to wake us up again. 1652 * Make sure that nobody tries to wake us up again.
1661 */ 1653 */
1662 shost->eh_wait = NULL; 1654 shost->eh_wait = NULL;
1663
1664 /*
1665 * Knock this down too. From this point on, the host is flying
1666 * without a pilot. If this is because the module is being unloaded,
1667 * that's fine. If the user sent a signal to this thing, we are
1668 * potentially in real danger.
1669 */
1670 shost->eh_active = 0;
1671 shost->ehandler = NULL;
1672
1673 /*
1674 * If anyone is waiting for us to exit (i.e. someone trying to unload
1675 * a driver), then wake up that process to let them know we are on
1676 * the way out the door.
1677 */
1678 complete_and_exit(shost->eh_notify, 0);
1679 return 0; 1655 return 0;
1680} 1656}
1681 1657
@@ -1846,12 +1822,16 @@ EXPORT_SYMBOL(scsi_reset_provider);
1846int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1822int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1847 struct scsi_sense_hdr *sshdr) 1823 struct scsi_sense_hdr *sshdr)
1848{ 1824{
1849 if (!sense_buffer || !sb_len || (sense_buffer[0] & 0x70) != 0x70) 1825 if (!sense_buffer || !sb_len)
1850 return 0; 1826 return 0;
1851 1827
1852 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 1828 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1853 1829
1854 sshdr->response_code = (sense_buffer[0] & 0x7f); 1830 sshdr->response_code = (sense_buffer[0] & 0x7f);
1831
1832 if (!scsi_sense_valid(sshdr))
1833 return 0;
1834
1855 if (sshdr->response_code >= 0x72) { 1835 if (sshdr->response_code >= 0x72) {
1856 /* 1836 /*
1857 * descriptor format 1837 * descriptor format
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 7a6b530115ac..b7fddac81347 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -30,20 +30,20 @@
30 30
31#define MAX_BUF PAGE_SIZE 31#define MAX_BUF PAGE_SIZE
32 32
33/* 33/**
34 * If we are told to probe a host, we will return 0 if the host is not 34 * ioctl_probe -- return host identification
35 * present, 1 if the host is present, and will return an identifying 35 * @host: host to identify
36 * string at *arg, if arg is non null, filling to the length stored at 36 * @buffer: userspace buffer for identification
37 * (int *) arg 37 *
38 * Return an identifying string at @buffer, if @buffer is non-NULL, filling
39 * to the length stored at * (int *) @buffer.
38 */ 40 */
39
40static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) 41static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
41{ 42{
42 unsigned int len, slen; 43 unsigned int len, slen;
43 const char *string; 44 const char *string;
44 int temp = host->hostt->present;
45 45
46 if (temp && buffer) { 46 if (buffer) {
47 if (get_user(len, (unsigned int __user *) buffer)) 47 if (get_user(len, (unsigned int __user *) buffer))
48 return -EFAULT; 48 return -EFAULT;
49 49
@@ -59,7 +59,7 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
59 return -EFAULT; 59 return -EFAULT;
60 } 60 }
61 } 61 }
62 return temp; 62 return 1;
63} 63}
64 64
65/* 65/*
@@ -88,25 +88,18 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
88static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, 88static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
89 int timeout, int retries) 89 int timeout, int retries)
90{ 90{
91 struct scsi_request *sreq;
92 int result; 91 int result;
93 struct scsi_sense_hdr sshdr; 92 struct scsi_sense_hdr sshdr;
94 93
95 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); 94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
96 95
97 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
98 if (!sreq) { 97 &sshdr, timeout, retries);
99 printk(KERN_WARNING "SCSI internal ioctl failed, no memory\n");
100 return -ENOMEM;
101 }
102
103 sreq->sr_data_direction = DMA_NONE;
104 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
105 98
106 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", sreq->sr_result)); 99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
107 100
108 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && 101 if ((driver_byte(result) & DRIVER_SENSE) &&
109 (scsi_request_normalize_sense(sreq, &sshdr))) { 102 (scsi_sense_valid(&sshdr))) {
110 switch (sshdr.sense_key) { 103 switch (sshdr.sense_key) {
111 case ILLEGAL_REQUEST: 104 case ILLEGAL_REQUEST:
112 if (cmd[0] == ALLOW_MEDIUM_REMOVAL) 105 if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -125,7 +118,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
125 case UNIT_ATTENTION: 118 case UNIT_ATTENTION:
126 if (sdev->removable) { 119 if (sdev->removable) {
127 sdev->changed = 1; 120 sdev->changed = 1;
128 sreq->sr_result = 0; /* This is no longer considered an error */ 121 result = 0; /* This is no longer considered an error */
129 break; 122 break;
130 } 123 }
131 default: /* Fall through for non-removable media */ 124 default: /* Fall through for non-removable media */
@@ -135,15 +128,13 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
135 sdev->channel, 128 sdev->channel,
136 sdev->id, 129 sdev->id,
137 sdev->lun, 130 sdev->lun,
138 sreq->sr_result); 131 result);
139 scsi_print_req_sense(" ", sreq); 132 scsi_print_sense_hdr(" ", &sshdr);
140 break; 133 break;
141 } 134 }
142 } 135 }
143 136
144 result = sreq->sr_result;
145 SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); 137 SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n"));
146 scsi_release_request(sreq);
147 return result; 138 return result;
148} 139}
149 140
@@ -208,8 +199,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
208{ 199{
209 char *buf; 200 char *buf;
210 unsigned char cmd[MAX_COMMAND_SIZE]; 201 unsigned char cmd[MAX_COMMAND_SIZE];
202 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
211 char __user *cmd_in; 203 char __user *cmd_in;
212 struct scsi_request *sreq;
213 unsigned char opcode; 204 unsigned char opcode;
214 unsigned int inlen, outlen, cmdlen; 205 unsigned int inlen, outlen, cmdlen;
215 unsigned int needed, buf_needed; 206 unsigned int needed, buf_needed;
@@ -321,31 +312,23 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
321 break; 312 break;
322 } 313 }
323 314
324 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 315 result = scsi_execute(sdev, cmd, data_direction, buf, needed,
325 if (!sreq) { 316 sense, timeout, retries, 0);
326 result = -EINTR;
327 goto error;
328 }
329
330 sreq->sr_data_direction = data_direction;
331 scsi_wait_req(sreq, cmd, buf, needed, timeout, retries);
332 317
333 /* 318 /*
334 * If there was an error condition, pass the info back to the user. 319 * If there was an error condition, pass the info back to the user.
335 */ 320 */
336 result = sreq->sr_result;
337 if (result) { 321 if (result) {
338 int sb_len = sizeof(sreq->sr_sense_buffer); 322 int sb_len = sizeof(*sense);
339 323
340 sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len; 324 sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
341 if (copy_to_user(cmd_in, sreq->sr_sense_buffer, sb_len)) 325 if (copy_to_user(cmd_in, sense, sb_len))
342 result = -EFAULT; 326 result = -EFAULT;
343 } else { 327 } else {
344 if (copy_to_user(cmd_in, buf, outlen)) 328 if (copy_to_user(cmd_in, buf, outlen))
345 result = -EFAULT; 329 result = -EFAULT;
346 } 330 }
347 331
348 scsi_release_request(sreq);
349error: 332error:
350 kfree(buf); 333 kfree(buf);
351 return result; 334 return result;
@@ -475,8 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
475 * error processing, as long as the device was opened 458 * error processing, as long as the device was opened
476 * non-blocking */ 459 * non-blocking */
477 if (filp && filp->f_flags & O_NONBLOCK) { 460 if (filp && filp->f_flags & O_NONBLOCK) {
478 if (test_bit(SHOST_RECOVERY, 461 if (sdev->host->shost_state == SHOST_RECOVERY)
479 &sdev->host->shost_state))
480 return -ENODEV; 462 return -ENODEV;
481 } else if (!scsi_block_when_processing_errors(sdev)) 463 } else if (!scsi_block_when_processing_errors(sdev))
482 return -ENODEV; 464 return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7a91ca3d32a6..77f2d444f7e0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -232,23 +232,6 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
232} 232}
233EXPORT_SYMBOL(scsi_do_req); 233EXPORT_SYMBOL(scsi_do_req);
234 234
235static void scsi_wait_done(struct scsi_cmnd *cmd)
236{
237 struct request *req = cmd->request;
238 struct request_queue *q = cmd->device->request_queue;
239 unsigned long flags;
240
241 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
242
243 spin_lock_irqsave(q->queue_lock, flags);
244 if (blk_rq_tagged(req))
245 blk_queue_end_tag(q, req);
246 spin_unlock_irqrestore(q->queue_lock, flags);
247
248 if (req->waiting)
249 complete(req->waiting);
250}
251
252/* This is the end routine we get to if a command was never attached 235/* This is the end routine we get to if a command was never attached
253 * to the request. Simply complete the request without changing 236 * to the request. Simply complete the request without changing
254 * rq_status; this will cause a DRIVER_ERROR. */ 237 * rq_status; this will cause a DRIVER_ERROR. */
@@ -263,21 +246,114 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
263 unsigned bufflen, int timeout, int retries) 246 unsigned bufflen, int timeout, int retries)
264{ 247{
265 DECLARE_COMPLETION(wait); 248 DECLARE_COMPLETION(wait);
266 249 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
267 sreq->sr_request->waiting = &wait; 250 struct request *req;
268 sreq->sr_request->rq_status = RQ_SCSI_BUSY; 251
269 sreq->sr_request->end_io = scsi_wait_req_end_io; 252 req = blk_get_request(sreq->sr_device->request_queue, write,
270 scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done, 253 __GFP_WAIT);
271 timeout, retries); 254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
258 return;
259 }
260
261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
266 req->sense_len = 0;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
270 req->rq_disk = NULL;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
272 wait_for_completion(&wait); 273 wait_for_completion(&wait);
273 sreq->sr_request->waiting = NULL; 274 sreq->sr_request->waiting = NULL;
274 if (sreq->sr_request->rq_status != RQ_SCSI_DONE) 275 sreq->sr_result = req->errors;
276 if (req->errors)
275 sreq->sr_result |= (DRIVER_ERROR << 24); 277 sreq->sr_result |= (DRIVER_ERROR << 24);
276 278
277 __scsi_release_request(sreq); 279 blk_put_request(req);
278} 280}
281
279EXPORT_SYMBOL(scsi_wait_req); 282EXPORT_SYMBOL(scsi_wait_req);
280 283
284/**
285 * scsi_execute - insert request and wait for the result
286 * @sdev: scsi device
287 * @cmd: scsi command
288 * @data_direction: data direction
289 * @buffer: data buffer
290 * @bufflen: len of buffer
291 * @sense: optional sense buffer
292 * @timeout: request timeout in seconds
293 * @retries: number of times to retry request
294 * @flags: or into request flags;
295 *
296 * returns the req->errors value which is the the scsi_cmnd result
297 * field.
298 **/
299int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300 int data_direction, void *buffer, unsigned bufflen,
301 unsigned char *sense, int timeout, int retries, int flags)
302{
303 struct request *req;
304 int write = (data_direction == DMA_TO_DEVICE);
305 int ret = DRIVER_ERROR << 24;
306
307 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
308
309 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
310 buffer, bufflen, __GFP_WAIT))
311 goto out;
312
313 req->cmd_len = COMMAND_SIZE(cmd[0]);
314 memcpy(req->cmd, cmd, req->cmd_len);
315 req->sense = sense;
316 req->sense_len = 0;
317 req->timeout = timeout;
318 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
319
320 /*
321 * head injection *required* here otherwise quiesce won't work
322 */
323 blk_execute_rq(req->q, NULL, req, 1);
324
325 ret = req->errors;
326 out:
327 blk_put_request(req);
328
329 return ret;
330}
331EXPORT_SYMBOL(scsi_execute);
332
333
334int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
335 int data_direction, void *buffer, unsigned bufflen,
336 struct scsi_sense_hdr *sshdr, int timeout, int retries)
337{
338 char *sense = NULL;
339 int result;
340
341 if (sshdr) {
342 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
343 if (!sense)
344 return DRIVER_ERROR << 24;
345 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
346 }
347 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
348 sense, timeout, retries, 0);
349 if (sshdr)
350 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
351
352 kfree(sense);
353 return result;
354}
355EXPORT_SYMBOL(scsi_execute_req);
356
281/* 357/*
282 * Function: scsi_init_cmd_errh() 358 * Function: scsi_init_cmd_errh()
283 * 359 *
@@ -348,7 +424,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
348 424
349 spin_lock_irqsave(shost->host_lock, flags); 425 spin_lock_irqsave(shost->host_lock, flags);
350 shost->host_busy--; 426 shost->host_busy--;
351 if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) && 427 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
352 shost->host_failed)) 428 shost->host_failed))
353 scsi_eh_wakeup(shost); 429 scsi_eh_wakeup(shost);
354 spin_unlock(shost->host_lock); 430 spin_unlock(shost->host_lock);
@@ -851,17 +927,20 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
851 scsi_requeue_command(q, cmd); 927 scsi_requeue_command(q, cmd);
852 return; 928 return;
853 } 929 }
854 printk(KERN_INFO "Device %s not ready.\n", 930 if (!(req->flags & REQ_QUIET))
855 req->rq_disk ? req->rq_disk->disk_name : ""); 931 dev_printk(KERN_INFO,
932 &cmd->device->sdev_gendev,
933 "Device not ready.\n");
856 cmd = scsi_end_request(cmd, 0, this_count, 1); 934 cmd = scsi_end_request(cmd, 0, this_count, 1);
857 return; 935 return;
858 case VOLUME_OVERFLOW: 936 case VOLUME_OVERFLOW:
859 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ", 937 if (!(req->flags & REQ_QUIET)) {
860 cmd->device->host->host_no, 938 dev_printk(KERN_INFO,
861 (int)cmd->device->channel, 939 &cmd->device->sdev_gendev,
862 (int)cmd->device->id, (int)cmd->device->lun); 940 "Volume overflow, CDB: ");
863 __scsi_print_command(cmd->data_cmnd); 941 __scsi_print_command(cmd->data_cmnd);
864 scsi_print_sense("", cmd); 942 scsi_print_sense("", cmd);
943 }
865 cmd = scsi_end_request(cmd, 0, block_bytes, 1); 944 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
866 return; 945 return;
867 default: 946 default:
@@ -878,14 +957,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
878 return; 957 return;
879 } 958 }
880 if (result) { 959 if (result) {
881 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code " 960 if (!(req->flags & REQ_QUIET)) {
882 "= 0x%x\n", cmd->device->host->host_no, 961 dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
883 cmd->device->channel, 962 "SCSI error: return code = 0x%x\n", result);
884 cmd->device->id, 963
885 cmd->device->lun, result); 964 if (driver_byte(result) & DRIVER_SENSE)
886 965 scsi_print_sense("", cmd);
887 if (driver_byte(result) & DRIVER_SENSE) 966 }
888 scsi_print_sense("", cmd);
889 /* 967 /*
890 * Mark a single buffer as not uptodate. Queue the remainder. 968 * Mark a single buffer as not uptodate. Queue the remainder.
891 * We sometimes get this cruft in the event that a medium error 969 * We sometimes get this cruft in the event that a medium error
@@ -1020,6 +1098,12 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1020 return -EOPNOTSUPP; 1098 return -EOPNOTSUPP;
1021} 1099}
1022 1100
1101static void scsi_generic_done(struct scsi_cmnd *cmd)
1102{
1103 BUG_ON(!blk_pc_request(cmd->request));
1104 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1105}
1106
1023static int scsi_prep_fn(struct request_queue *q, struct request *req) 1107static int scsi_prep_fn(struct request_queue *q, struct request *req)
1024{ 1108{
1025 struct scsi_device *sdev = q->queuedata; 1109 struct scsi_device *sdev = q->queuedata;
@@ -1061,7 +1145,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1061 * these two cases differently. We differentiate by looking 1145 * these two cases differently. We differentiate by looking
1062 * at request->cmd, as this tells us the real story. 1146 * at request->cmd, as this tells us the real story.
1063 */ 1147 */
1064 if (req->flags & REQ_SPECIAL) { 1148 if (req->flags & REQ_SPECIAL && req->special) {
1065 struct scsi_request *sreq = req->special; 1149 struct scsi_request *sreq = req->special;
1066 1150
1067 if (sreq->sr_magic == SCSI_REQ_MAGIC) { 1151 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
@@ -1073,7 +1157,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1073 cmd = req->special; 1157 cmd = req->special;
1074 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1158 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1075 1159
1076 if(unlikely(specials_only)) { 1160 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1077 if(specials_only == SDEV_QUIESCE || 1161 if(specials_only == SDEV_QUIESCE ||
1078 specials_only == SDEV_BLOCK) 1162 specials_only == SDEV_BLOCK)
1079 return BLKPREP_DEFER; 1163 return BLKPREP_DEFER;
@@ -1142,11 +1226,26 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1142 /* 1226 /*
1143 * Initialize the actual SCSI command for this request. 1227 * Initialize the actual SCSI command for this request.
1144 */ 1228 */
1145 drv = *(struct scsi_driver **)req->rq_disk->private_data; 1229 if (req->rq_disk) {
1146 if (unlikely(!drv->init_command(cmd))) { 1230 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1147 scsi_release_buffers(cmd); 1231 if (unlikely(!drv->init_command(cmd))) {
1148 scsi_put_command(cmd); 1232 scsi_release_buffers(cmd);
1149 return BLKPREP_KILL; 1233 scsi_put_command(cmd);
1234 return BLKPREP_KILL;
1235 }
1236 } else {
1237 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1238 if (rq_data_dir(req) == WRITE)
1239 cmd->sc_data_direction = DMA_TO_DEVICE;
1240 else if (req->data_len)
1241 cmd->sc_data_direction = DMA_FROM_DEVICE;
1242 else
1243 cmd->sc_data_direction = DMA_NONE;
1244
1245 cmd->transfersize = req->data_len;
1246 cmd->allowed = 3;
1247 cmd->timeout_per_command = req->timeout;
1248 cmd->done = scsi_generic_done;
1150 } 1249 }
1151 } 1250 }
1152 1251
@@ -1207,7 +1306,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1207 struct Scsi_Host *shost, 1306 struct Scsi_Host *shost,
1208 struct scsi_device *sdev) 1307 struct scsi_device *sdev)
1209{ 1308{
1210 if (test_bit(SHOST_RECOVERY, &shost->shost_state)) 1309 if (shost->shost_state == SHOST_RECOVERY)
1211 return 0; 1310 return 0;
1212 if (shost->host_busy == 0 && shost->host_blocked) { 1311 if (shost->host_busy == 0 && shost->host_blocked) {
1213 /* 1312 /*
@@ -1539,9 +1638,9 @@ void scsi_exit_queue(void)
1539 } 1638 }
1540} 1639}
1541/** 1640/**
1542 * __scsi_mode_sense - issue a mode sense, falling back from 10 to 1641 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1543 * six bytes if necessary. 1642 * six bytes if necessary.
1544 * @sreq: SCSI request to fill in with the MODE_SENSE 1643 * @sdev: SCSI device to be queried
1545 * @dbd: set if mode sense will allow block descriptors to be returned 1644 * @dbd: set if mode sense will allow block descriptors to be returned
1546 * @modepage: mode page being requested 1645 * @modepage: mode page being requested
1547 * @buffer: request buffer (may not be smaller than eight bytes) 1646 * @buffer: request buffer (may not be smaller than eight bytes)
@@ -1549,26 +1648,34 @@ void scsi_exit_queue(void)
1549 * @timeout: command timeout 1648 * @timeout: command timeout
1550 * @retries: number of retries before failing 1649 * @retries: number of retries before failing
1551 * @data: returns a structure abstracting the mode header data 1650 * @data: returns a structure abstracting the mode header data
1651 * @sense: place to put sense data (or NULL if no sense to be collected).
1652 * must be SCSI_SENSE_BUFFERSIZE big.
1552 * 1653 *
1553 * Returns zero if unsuccessful, or the header offset (either 4 1654 * Returns zero if unsuccessful, or the header offset (either 4
1554 * or 8 depending on whether a six or ten byte command was 1655 * or 8 depending on whether a six or ten byte command was
1555 * issued) if successful. 1656 * issued) if successful.
1556 **/ 1657 **/
1557int 1658int
1558__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage, 1659scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1559 unsigned char *buffer, int len, int timeout, int retries, 1660 unsigned char *buffer, int len, int timeout, int retries,
1560 struct scsi_mode_data *data) { 1661 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1561 unsigned char cmd[12]; 1662 unsigned char cmd[12];
1562 int use_10_for_ms; 1663 int use_10_for_ms;
1563 int header_length; 1664 int header_length;
1665 int result;
1666 struct scsi_sense_hdr my_sshdr;
1564 1667
1565 memset(data, 0, sizeof(*data)); 1668 memset(data, 0, sizeof(*data));
1566 memset(&cmd[0], 0, 12); 1669 memset(&cmd[0], 0, 12);
1567 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1670 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1568 cmd[2] = modepage; 1671 cmd[2] = modepage;
1569 1672
1673 /* caller might not be interested in sense, but we need it */
1674 if (!sshdr)
1675 sshdr = &my_sshdr;
1676
1570 retry: 1677 retry:
1571 use_10_for_ms = sreq->sr_device->use_10_for_ms; 1678 use_10_for_ms = sdev->use_10_for_ms;
1572 1679
1573 if (use_10_for_ms) { 1680 if (use_10_for_ms) {
1574 if (len < 8) 1681 if (len < 8)
@@ -1586,36 +1693,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1586 header_length = 4; 1693 header_length = 4;
1587 } 1694 }
1588 1695
1589 sreq->sr_cmd_len = 0;
1590 memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1591 sreq->sr_data_direction = DMA_FROM_DEVICE;
1592
1593 memset(buffer, 0, len); 1696 memset(buffer, 0, len);
1594 1697
1595 scsi_wait_req(sreq, cmd, buffer, len, timeout, retries); 1698 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1699 sshdr, timeout, retries);
1596 1700
1597 /* This code looks awful: what it's doing is making sure an 1701 /* This code looks awful: what it's doing is making sure an
1598 * ILLEGAL REQUEST sense return identifies the actual command 1702 * ILLEGAL REQUEST sense return identifies the actual command
1599 * byte as the problem. MODE_SENSE commands can return 1703 * byte as the problem. MODE_SENSE commands can return
1600 * ILLEGAL REQUEST if the code page isn't supported */ 1704 * ILLEGAL REQUEST if the code page isn't supported */
1601 1705
1602 if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) && 1706 if (use_10_for_ms && !scsi_status_is_good(result) &&
1603 (driver_byte(sreq->sr_result) & DRIVER_SENSE)) { 1707 (driver_byte(result) & DRIVER_SENSE)) {
1604 struct scsi_sense_hdr sshdr; 1708 if (scsi_sense_valid(sshdr)) {
1605 1709 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1606 if (scsi_request_normalize_sense(sreq, &sshdr)) { 1710 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1607 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1608 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1609 /* 1711 /*
1610 * Invalid command operation code 1712 * Invalid command operation code
1611 */ 1713 */
1612 sreq->sr_device->use_10_for_ms = 0; 1714 sdev->use_10_for_ms = 0;
1613 goto retry; 1715 goto retry;
1614 } 1716 }
1615 } 1717 }
1616 } 1718 }
1617 1719
1618 if(scsi_status_is_good(sreq->sr_result)) { 1720 if(scsi_status_is_good(result)) {
1619 data->header_length = header_length; 1721 data->header_length = header_length;
1620 if(use_10_for_ms) { 1722 if(use_10_for_ms) {
1621 data->length = buffer[0]*256 + buffer[1] + 2; 1723 data->length = buffer[0]*256 + buffer[1] + 2;
@@ -1632,73 +1734,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1632 } 1734 }
1633 } 1735 }
1634 1736
1635 return sreq->sr_result; 1737 return result;
1636}
1637EXPORT_SYMBOL(__scsi_mode_sense);
1638
1639/**
1640 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1641 * six bytes if necessary.
1642 * @sdev: scsi device to send command to.
1643 * @dbd: set if mode sense will disable block descriptors in the return
1644 * @modepage: mode page being requested
1645 * @buffer: request buffer (may not be smaller than eight bytes)
1646 * @len: length of request buffer.
1647 * @timeout: command timeout
1648 * @retries: number of retries before failing
1649 *
1650 * Returns zero if unsuccessful, or the header offset (either 4
1651 * or 8 depending on whether a six or ten byte command was
1652 * issued) if successful.
1653 **/
1654int
1655scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1656 unsigned char *buffer, int len, int timeout, int retries,
1657 struct scsi_mode_data *data)
1658{
1659 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1660 int ret;
1661
1662 if (!sreq)
1663 return -1;
1664
1665 ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1666 timeout, retries, data);
1667
1668 scsi_release_request(sreq);
1669
1670 return ret;
1671} 1738}
1672EXPORT_SYMBOL(scsi_mode_sense); 1739EXPORT_SYMBOL(scsi_mode_sense);
1673 1740
1674int 1741int
1675scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) 1742scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1676{ 1743{
1677 struct scsi_request *sreq;
1678 char cmd[] = { 1744 char cmd[] = {
1679 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1745 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1680 }; 1746 };
1747 struct scsi_sense_hdr sshdr;
1681 int result; 1748 int result;
1682 1749
1683 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 1750 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1684 if (!sreq) 1751 timeout, retries);
1685 return -ENOMEM;
1686
1687 sreq->sr_data_direction = DMA_NONE;
1688 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1689 1752
1690 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) { 1753 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1691 struct scsi_sense_hdr sshdr;
1692 1754
1693 if ((scsi_request_normalize_sense(sreq, &sshdr)) && 1755 if ((scsi_sense_valid(&sshdr)) &&
1694 ((sshdr.sense_key == UNIT_ATTENTION) || 1756 ((sshdr.sense_key == UNIT_ATTENTION) ||
1695 (sshdr.sense_key == NOT_READY))) { 1757 (sshdr.sense_key == NOT_READY))) {
1696 sdev->changed = 1; 1758 sdev->changed = 1;
1697 sreq->sr_result = 0; 1759 result = 0;
1698 } 1760 }
1699 } 1761 }
1700 result = sreq->sr_result;
1701 scsi_release_request(sreq);
1702 return result; 1762 return result;
1703} 1763}
1704EXPORT_SYMBOL(scsi_test_unit_ready); 1764EXPORT_SYMBOL(scsi_test_unit_ready);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d30d7f4e63ec..ee6de1768e53 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -63,6 +63,9 @@ extern int __init scsi_init_devinfo(void);
63extern void scsi_exit_devinfo(void); 63extern void scsi_exit_devinfo(void);
64 64
65/* scsi_error.c */ 65/* scsi_error.c */
66extern void scsi_add_timer(struct scsi_cmnd *, int,
67 void (*)(struct scsi_cmnd *));
68extern int scsi_delete_timer(struct scsi_cmnd *);
66extern void scsi_times_out(struct scsi_cmnd *cmd); 69extern void scsi_times_out(struct scsi_cmnd *cmd);
67extern int scsi_error_handler(void *host); 70extern int scsi_error_handler(void *host);
68extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 71extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 48edd67982a5..19c9a232a754 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -111,15 +111,14 @@ MODULE_PARM_DESC(inq_timeout,
111 111
112/** 112/**
113 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 113 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
114 * @sreq: used to send the command 114 * @sdev: scsi device to send command to
115 * @result: area to store the result of the MODE SENSE 115 * @result: area to store the result of the MODE SENSE
116 * 116 *
117 * Description: 117 * Description:
118 * Send a vendor specific MODE SENSE (not a MODE SELECT) command using 118 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
119 * @sreq to unlock a device, storing the (unused) results into result.
120 * Called for BLIST_KEY devices. 119 * Called for BLIST_KEY devices.
121 **/ 120 **/
122static void scsi_unlock_floptical(struct scsi_request *sreq, 121static void scsi_unlock_floptical(struct scsi_device *sdev,
123 unsigned char *result) 122 unsigned char *result)
124{ 123{
125 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 124 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -129,11 +128,10 @@ static void scsi_unlock_floptical(struct scsi_request *sreq,
129 scsi_cmd[1] = 0; 128 scsi_cmd[1] = 0;
130 scsi_cmd[2] = 0x2e; 129 scsi_cmd[2] = 0x2e;
131 scsi_cmd[3] = 0; 130 scsi_cmd[3] = 0;
132 scsi_cmd[4] = 0x2a; /* size */ 131 scsi_cmd[4] = 0x2a; /* size */
133 scsi_cmd[5] = 0; 132 scsi_cmd[5] = 0;
134 sreq->sr_cmd_len = 0; 133 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
135 sreq->sr_data_direction = DMA_FROM_DEVICE; 134 SCSI_TIMEOUT, 3);
136 scsi_wait_req(sreq, scsi_cmd, result, 0x2a /* size */, SCSI_TIMEOUT, 3);
137} 135}
138 136
139/** 137/**
@@ -433,26 +431,25 @@ void scsi_target_reap(struct scsi_target *starget)
433 431
434/** 432/**
435 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 433 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
436 * @sreq: used to send the INQUIRY 434 * @sdev: scsi_device to probe
437 * @inq_result: area to store the INQUIRY result 435 * @inq_result: area to store the INQUIRY result
436 * @result_len: len of inq_result
438 * @bflags: store any bflags found here 437 * @bflags: store any bflags found here
439 * 438 *
440 * Description: 439 * Description:
441 * Probe the lun associated with @sreq using a standard SCSI INQUIRY; 440 * Probe the lun associated with @req using a standard SCSI INQUIRY;
442 * 441 *
443 * If the INQUIRY is successful, sreq->sr_result is zero and: the 442 * If the INQUIRY is successful, zero is returned and the
444 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 443 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
445 * are copied to the Scsi_Device at @sreq->sr_device (sdev); 444 * are copied to the Scsi_Device any flags value is stored in *@bflags.
446 * any flags value is stored in *@bflags.
447 **/ 445 **/
448static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result, 446static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
449 int *bflags) 447 int result_len, int *bflags)
450{ 448{
451 struct scsi_device *sdev = sreq->sr_device; /* a bit ugly */
452 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 449 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
453 int first_inquiry_len, try_inquiry_len, next_inquiry_len; 450 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
454 int response_len = 0; 451 int response_len = 0;
455 int pass, count; 452 int pass, count, result;
456 struct scsi_sense_hdr sshdr; 453 struct scsi_sense_hdr sshdr;
457 454
458 *bflags = 0; 455 *bflags = 0;
@@ -475,28 +472,26 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
475 memset(scsi_cmd, 0, 6); 472 memset(scsi_cmd, 0, 6);
476 scsi_cmd[0] = INQUIRY; 473 scsi_cmd[0] = INQUIRY;
477 scsi_cmd[4] = (unsigned char) try_inquiry_len; 474 scsi_cmd[4] = (unsigned char) try_inquiry_len;
478 sreq->sr_cmd_len = 0;
479 sreq->sr_data_direction = DMA_FROM_DEVICE;
480 475
481 memset(inq_result, 0, try_inquiry_len); 476 memset(inq_result, 0, try_inquiry_len);
482 scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result, 477
483 try_inquiry_len, 478 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
484 HZ/2 + HZ*scsi_inq_timeout, 3); 479 inq_result, try_inquiry_len, &sshdr,
480 HZ / 2 + HZ * scsi_inq_timeout, 3);
485 481
486 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " 482 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
487 "with code 0x%x\n", 483 "with code 0x%x\n",
488 sreq->sr_result ? "failed" : "successful", 484 result ? "failed" : "successful", result));
489 sreq->sr_result));
490 485
491 if (sreq->sr_result) { 486 if (result) {
492 /* 487 /*
493 * not-ready to ready transition [asc/ascq=0x28/0x0] 488 * not-ready to ready transition [asc/ascq=0x28/0x0]
494 * or power-on, reset [asc/ascq=0x29/0x0], continue. 489 * or power-on, reset [asc/ascq=0x29/0x0], continue.
495 * INQUIRY should not yield UNIT_ATTENTION 490 * INQUIRY should not yield UNIT_ATTENTION
496 * but many buggy devices do so anyway. 491 * but many buggy devices do so anyway.
497 */ 492 */
498 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && 493 if ((driver_byte(result) & DRIVER_SENSE) &&
499 scsi_request_normalize_sense(sreq, &sshdr)) { 494 scsi_sense_valid(&sshdr)) {
500 if ((sshdr.sense_key == UNIT_ATTENTION) && 495 if ((sshdr.sense_key == UNIT_ATTENTION) &&
501 ((sshdr.asc == 0x28) || 496 ((sshdr.asc == 0x28) ||
502 (sshdr.asc == 0x29)) && 497 (sshdr.asc == 0x29)) &&
@@ -507,7 +502,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
507 break; 502 break;
508 } 503 }
509 504
510 if (sreq->sr_result == 0) { 505 if (result == 0) {
511 response_len = (unsigned char) inq_result[4] + 5; 506 response_len = (unsigned char) inq_result[4] + 5;
512 if (response_len > 255) 507 if (response_len > 255)
513 response_len = first_inquiry_len; /* sanity */ 508 response_len = first_inquiry_len; /* sanity */
@@ -556,8 +551,8 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
556 551
557 /* If the last transfer attempt got an error, assume the 552 /* If the last transfer attempt got an error, assume the
558 * peripheral doesn't exist or is dead. */ 553 * peripheral doesn't exist or is dead. */
559 if (sreq->sr_result) 554 if (result)
560 return; 555 return -EIO;
561 556
562 /* Don't report any more data than the device says is valid */ 557 /* Don't report any more data than the device says is valid */
563 sdev->inquiry_len = min(try_inquiry_len, response_len); 558 sdev->inquiry_len = min(try_inquiry_len, response_len);
@@ -593,7 +588,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
593 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
594 sdev->scsi_level++; 589 sdev->scsi_level++;
595 590
596 return; 591 return 0;
597} 592}
598 593
599/** 594/**
@@ -800,9 +795,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
800 void *hostdata) 795 void *hostdata)
801{ 796{
802 struct scsi_device *sdev; 797 struct scsi_device *sdev;
803 struct scsi_request *sreq;
804 unsigned char *result; 798 unsigned char *result;
805 int bflags, res = SCSI_SCAN_NO_RESPONSE; 799 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
806 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 800 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
807 801
808 /* 802 /*
@@ -831,16 +825,13 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
831 sdev = scsi_alloc_sdev(starget, lun, hostdata); 825 sdev = scsi_alloc_sdev(starget, lun, hostdata);
832 if (!sdev) 826 if (!sdev)
833 goto out; 827 goto out;
834 sreq = scsi_allocate_request(sdev, GFP_ATOMIC); 828
835 if (!sreq) 829 result = kmalloc(result_len, GFP_ATOMIC |
836 goto out_free_sdev;
837 result = kmalloc(256, GFP_ATOMIC |
838 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); 830 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
839 if (!result) 831 if (!result)
840 goto out_free_sreq; 832 goto out_free_sdev;
841 833
842 scsi_probe_lun(sreq, result, &bflags); 834 if (scsi_probe_lun(sdev, result, result_len, &bflags))
843 if (sreq->sr_result)
844 goto out_free_result; 835 goto out_free_result;
845 836
846 /* 837 /*
@@ -868,7 +859,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
868 if (res == SCSI_SCAN_LUN_PRESENT) { 859 if (res == SCSI_SCAN_LUN_PRESENT) {
869 if (bflags & BLIST_KEY) { 860 if (bflags & BLIST_KEY) {
870 sdev->lockable = 0; 861 sdev->lockable = 0;
871 scsi_unlock_floptical(sreq, result); 862 scsi_unlock_floptical(sdev, result);
872 } 863 }
873 if (bflagsp) 864 if (bflagsp)
874 *bflagsp = bflags; 865 *bflagsp = bflags;
@@ -876,8 +867,6 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
876 867
877 out_free_result: 868 out_free_result:
878 kfree(result); 869 kfree(result);
879 out_free_sreq:
880 scsi_release_request(sreq);
881 out_free_sdev: 870 out_free_sdev:
882 if (res == SCSI_SCAN_LUN_PRESENT) { 871 if (res == SCSI_SCAN_LUN_PRESENT) {
883 if (sdevp) { 872 if (sdevp) {
@@ -1070,8 +1059,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1070 unsigned int lun; 1059 unsigned int lun;
1071 unsigned int num_luns; 1060 unsigned int num_luns;
1072 unsigned int retries; 1061 unsigned int retries;
1062 int result;
1073 struct scsi_lun *lunp, *lun_data; 1063 struct scsi_lun *lunp, *lun_data;
1074 struct scsi_request *sreq;
1075 u8 *data; 1064 u8 *data;
1076 struct scsi_sense_hdr sshdr; 1065 struct scsi_sense_hdr sshdr;
1077 struct scsi_target *starget = scsi_target(sdev); 1066 struct scsi_target *starget = scsi_target(sdev);
@@ -1089,10 +1078,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1089 if (bflags & BLIST_NOLUN) 1078 if (bflags & BLIST_NOLUN)
1090 return 0; 1079 return 0;
1091 1080
1092 sreq = scsi_allocate_request(sdev, GFP_ATOMIC);
1093 if (!sreq)
1094 goto out;
1095
1096 sprintf(devname, "host %d channel %d id %d", 1081 sprintf(devname, "host %d channel %d id %d",
1097 sdev->host->host_no, sdev->channel, sdev->id); 1082 sdev->host->host_no, sdev->channel, sdev->id);
1098 1083
@@ -1110,7 +1095,7 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1110 lun_data = kmalloc(length, GFP_ATOMIC | 1095 lun_data = kmalloc(length, GFP_ATOMIC |
1111 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1096 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1112 if (!lun_data) 1097 if (!lun_data)
1113 goto out_release_request; 1098 goto out;
1114 1099
1115 scsi_cmd[0] = REPORT_LUNS; 1100 scsi_cmd[0] = REPORT_LUNS;
1116 1101
@@ -1129,8 +1114,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1129 1114
1130 scsi_cmd[10] = 0; /* reserved */ 1115 scsi_cmd[10] = 0; /* reserved */
1131 scsi_cmd[11] = 0; /* control */ 1116 scsi_cmd[11] = 0; /* control */
1132 sreq->sr_cmd_len = 0;
1133 sreq->sr_data_direction = DMA_FROM_DEVICE;
1134 1117
1135 /* 1118 /*
1136 * We can get a UNIT ATTENTION, for example a power on/reset, so 1119 * We can get a UNIT ATTENTION, for example a power on/reset, so
@@ -1146,29 +1129,29 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1146 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending" 1129 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending"
1147 " REPORT LUNS to %s (try %d)\n", devname, 1130 " REPORT LUNS to %s (try %d)\n", devname,
1148 retries)); 1131 retries));
1149 scsi_wait_req(sreq, scsi_cmd, lun_data, length, 1132
1150 SCSI_TIMEOUT + 4*HZ, 3); 1133 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1134 lun_data, length, &sshdr,
1135 SCSI_TIMEOUT + 4 * HZ, 3);
1136
1151 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" 1137 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
1152 " %s (try %d) result 0x%x\n", sreq->sr_result 1138 " %s (try %d) result 0x%x\n", result
1153 ? "failed" : "successful", retries, 1139 ? "failed" : "successful", retries, result));
1154 sreq->sr_result)); 1140 if (result == 0)
1155 if (sreq->sr_result == 0)
1156 break; 1141 break;
1157 else if (scsi_request_normalize_sense(sreq, &sshdr)) { 1142 else if (scsi_sense_valid(&sshdr)) {
1158 if (sshdr.sense_key != UNIT_ATTENTION) 1143 if (sshdr.sense_key != UNIT_ATTENTION)
1159 break; 1144 break;
1160 } 1145 }
1161 } 1146 }
1162 1147
1163 if (sreq->sr_result) { 1148 if (result) {
1164 /* 1149 /*
1165 * The device probably does not support a REPORT LUN command 1150 * The device probably does not support a REPORT LUN command
1166 */ 1151 */
1167 kfree(lun_data); 1152 kfree(lun_data);
1168 scsi_release_request(sreq);
1169 return 1; 1153 return 1;
1170 } 1154 }
1171 scsi_release_request(sreq);
1172 1155
1173 /* 1156 /*
1174 * Get the length from the first four bytes of lun_data. 1157 * Get the length from the first four bytes of lun_data.
@@ -1242,8 +1225,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1242 kfree(lun_data); 1225 kfree(lun_data);
1243 return 0; 1226 return 0;
1244 1227
1245 out_release_request:
1246 scsi_release_request(sreq);
1247 out: 1228 out:
1248 /* 1229 /*
1249 * We are out of memory, don't try scanning any further. 1230 * We are out of memory, don't try scanning any further.
@@ -1265,9 +1246,12 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1265 1246
1266 get_device(&starget->dev); 1247 get_device(&starget->dev);
1267 down(&shost->scan_mutex); 1248 down(&shost->scan_mutex);
1268 res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1249 if (scsi_host_scan_allowed(shost)) {
1269 if (res != SCSI_SCAN_LUN_PRESENT) 1250 res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1,
1270 sdev = ERR_PTR(-ENODEV); 1251 hostdata);
1252 if (res != SCSI_SCAN_LUN_PRESENT)
1253 sdev = ERR_PTR(-ENODEV);
1254 }
1271 up(&shost->scan_mutex); 1255 up(&shost->scan_mutex);
1272 scsi_target_reap(starget); 1256 scsi_target_reap(starget);
1273 put_device(&starget->dev); 1257 put_device(&starget->dev);
@@ -1417,11 +1401,15 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1417 return -EINVAL; 1401 return -EINVAL;
1418 1402
1419 down(&shost->scan_mutex); 1403 down(&shost->scan_mutex);
1420 if (channel == SCAN_WILD_CARD) 1404 if (scsi_host_scan_allowed(shost)) {
1421 for (channel = 0; channel <= shost->max_channel; channel++) 1405 if (channel == SCAN_WILD_CARD)
1406 for (channel = 0; channel <= shost->max_channel;
1407 channel++)
1408 scsi_scan_channel(shost, channel, id, lun,
1409 rescan);
1410 else
1422 scsi_scan_channel(shost, channel, id, lun, rescan); 1411 scsi_scan_channel(shost, channel, id, lun, rescan);
1423 else 1412 }
1424 scsi_scan_channel(shost, channel, id, lun, rescan);
1425 up(&shost->scan_mutex); 1413 up(&shost->scan_mutex);
1426 1414
1427 return 0; 1415 return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index beed7fbe1cbe..dae59d1da07a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -48,6 +48,30 @@ const char *scsi_device_state_name(enum scsi_device_state state)
48 return name; 48 return name;
49} 49}
50 50
51static struct {
52 enum scsi_host_state value;
53 char *name;
54} shost_states[] = {
55 { SHOST_CREATED, "created" },
56 { SHOST_RUNNING, "running" },
57 { SHOST_CANCEL, "cancel" },
58 { SHOST_DEL, "deleted" },
59 { SHOST_RECOVERY, "recovery" },
60};
61const char *scsi_host_state_name(enum scsi_host_state state)
62{
63 int i;
64 char *name = NULL;
65
66 for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
67 if (shost_states[i].value == state) {
68 name = shost_states[i].name;
69 break;
70 }
71 }
72 return name;
73}
74
51static int check_set(unsigned int *val, char *src) 75static int check_set(unsigned int *val, char *src)
52{ 76{
53 char *last; 77 char *last;
@@ -124,6 +148,43 @@ static ssize_t store_scan(struct class_device *class_dev, const char *buf,
124}; 148};
125static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); 149static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
126 150
151static ssize_t
152store_shost_state(struct class_device *class_dev, const char *buf, size_t count)
153{
154 int i;
155 struct Scsi_Host *shost = class_to_shost(class_dev);
156 enum scsi_host_state state = 0;
157
158 for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
159 const int len = strlen(shost_states[i].name);
160 if (strncmp(shost_states[i].name, buf, len) == 0 &&
161 buf[len] == '\n') {
162 state = shost_states[i].value;
163 break;
164 }
165 }
166 if (!state)
167 return -EINVAL;
168
169 if (scsi_host_set_state(shost, state))
170 return -EINVAL;
171 return count;
172}
173
174static ssize_t
175show_shost_state(struct class_device *class_dev, char *buf)
176{
177 struct Scsi_Host *shost = class_to_shost(class_dev);
178 const char *name = scsi_host_state_name(shost->shost_state);
179
180 if (!name)
181 return -EINVAL;
182
183 return snprintf(buf, 20, "%s\n", name);
184}
185
186static CLASS_DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
187
127shost_rd_attr(unique_id, "%u\n"); 188shost_rd_attr(unique_id, "%u\n");
128shost_rd_attr(host_busy, "%hu\n"); 189shost_rd_attr(host_busy, "%hu\n");
129shost_rd_attr(cmd_per_lun, "%hd\n"); 190shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -139,6 +200,7 @@ static struct class_device_attribute *scsi_sysfs_shost_attrs[] = {
139 &class_device_attr_unchecked_isa_dma, 200 &class_device_attr_unchecked_isa_dma,
140 &class_device_attr_proc_name, 201 &class_device_attr_proc_name,
141 &class_device_attr_scan, 202 &class_device_attr_scan,
203 &class_device_attr_state,
142 NULL 204 NULL
143}; 205};
144 206
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e6412fce423c..2cab556b6e82 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -252,7 +252,8 @@ struct fc_internal {
252 252
253#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) 253#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
254 254
255static int fc_target_setup(struct device *dev) 255static int fc_target_setup(struct transport_container *tc, struct device *dev,
256 struct class_device *cdev)
256{ 257{
257 struct scsi_target *starget = to_scsi_target(dev); 258 struct scsi_target *starget = to_scsi_target(dev);
258 struct fc_rport *rport = starget_to_rport(starget); 259 struct fc_rport *rport = starget_to_rport(starget);
@@ -281,7 +282,8 @@ static DECLARE_TRANSPORT_CLASS(fc_transport_class,
281 NULL, 282 NULL,
282 NULL); 283 NULL);
283 284
284static int fc_host_setup(struct device *dev) 285static int fc_host_setup(struct transport_container *tc, struct device *dev,
286 struct class_device *cdev)
285{ 287{
286 struct Scsi_Host *shost = dev_to_shost(dev); 288 struct Scsi_Host *shost = dev_to_shost(dev);
287 289
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 7670919a087a..ef577c8c2182 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -28,14 +28,14 @@
28#include "scsi_priv.h" 28#include "scsi_priv.h"
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_request.h> 31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_eh.h> 32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport.h> 33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_transport_spi.h> 34#include <scsi/scsi_transport_spi.h>
35 35
36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) 36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
37 37
38#define SPI_NUM_ATTRS 13 /* increase this if you add attributes */ 38#define SPI_NUM_ATTRS 14 /* increase this if you add attributes */
39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always 39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
40 * on" attributes */ 40 * on" attributes */
41#define SPI_HOST_ATTRS 1 41#define SPI_HOST_ATTRS 1
@@ -106,27 +106,31 @@ static int sprint_frac(char *dest, int value, int denom)
106 return result; 106 return result;
107} 107}
108 108
109/* Modification of scsi_wait_req that will clear UNIT ATTENTION conditions 109static int spi_execute(struct scsi_device *sdev, const void *cmd,
110 * resulting from (likely) bus and device resets */ 110 enum dma_data_direction dir,
111static void spi_wait_req(struct scsi_request *sreq, const void *cmd, 111 void *buffer, unsigned bufflen,
112 void *buffer, unsigned bufflen) 112 struct scsi_sense_hdr *sshdr)
113{ 113{
114 int i; 114 int i, result;
115 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
115 116
116 for(i = 0; i < DV_RETRIES; i++) { 117 for(i = 0; i < DV_RETRIES; i++) {
117 sreq->sr_request->flags |= REQ_FAILFAST; 118 result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
118 119 sense, DV_TIMEOUT, /* retries */ 1,
119 scsi_wait_req(sreq, cmd, buffer, bufflen, 120 REQ_FAILFAST);
120 DV_TIMEOUT, /* retries */ 1); 121 if (result & DRIVER_SENSE) {
121 if (sreq->sr_result & DRIVER_SENSE) { 122 struct scsi_sense_hdr sshdr_tmp;
122 struct scsi_sense_hdr sshdr; 123 if (!sshdr)
123 124 sshdr = &sshdr_tmp;
124 if (scsi_request_normalize_sense(sreq, &sshdr) 125
125 && sshdr.sense_key == UNIT_ATTENTION) 126 if (scsi_normalize_sense(sense, sizeof(*sense),
127 sshdr)
128 && sshdr->sense_key == UNIT_ATTENTION)
126 continue; 129 continue;
127 } 130 }
128 break; 131 break;
129 } 132 }
133 return result;
130} 134}
131 135
132static struct { 136static struct {
@@ -162,7 +166,8 @@ static inline enum spi_signal_type spi_signal_to_value(const char *name)
162 return SPI_SIGNAL_UNKNOWN; 166 return SPI_SIGNAL_UNKNOWN;
163} 167}
164 168
165static int spi_host_setup(struct device *dev) 169static int spi_host_setup(struct transport_container *tc, struct device *dev,
170 struct class_device *cdev)
166{ 171{
167 struct Scsi_Host *shost = dev_to_shost(dev); 172 struct Scsi_Host *shost = dev_to_shost(dev);
168 173
@@ -196,7 +201,9 @@ static int spi_host_match(struct attribute_container *cont,
196 return &i->t.host_attrs.ac == cont; 201 return &i->t.host_attrs.ac == cont;
197} 202}
198 203
199static int spi_device_configure(struct device *dev) 204static int spi_device_configure(struct transport_container *tc,
205 struct device *dev,
206 struct class_device *cdev)
200{ 207{
201 struct scsi_device *sdev = to_scsi_device(dev); 208 struct scsi_device *sdev = to_scsi_device(dev);
202 struct scsi_target *starget = sdev->sdev_target; 209 struct scsi_target *starget = sdev->sdev_target;
@@ -214,7 +221,9 @@ static int spi_device_configure(struct device *dev)
214 return 0; 221 return 0;
215} 222}
216 223
217static int spi_setup_transport_attrs(struct device *dev) 224static int spi_setup_transport_attrs(struct transport_container *tc,
225 struct device *dev,
226 struct class_device *cdev)
218{ 227{
219 struct scsi_target *starget = to_scsi_target(dev); 228 struct scsi_target *starget = to_scsi_target(dev);
220 229
@@ -231,6 +240,7 @@ static int spi_setup_transport_attrs(struct device *dev)
231 spi_rd_strm(starget) = 0; 240 spi_rd_strm(starget) = 0;
232 spi_rti(starget) = 0; 241 spi_rti(starget) = 0;
233 spi_pcomp_en(starget) = 0; 242 spi_pcomp_en(starget) = 0;
243 spi_hold_mcs(starget) = 0;
234 spi_dv_pending(starget) = 0; 244 spi_dv_pending(starget) = 0;
235 spi_initial_dv(starget) = 0; 245 spi_initial_dv(starget) = 0;
236 init_MUTEX(&spi_dv_sem(starget)); 246 init_MUTEX(&spi_dv_sem(starget));
@@ -347,6 +357,7 @@ spi_transport_rd_attr(wr_flow, "%d\n");
347spi_transport_rd_attr(rd_strm, "%d\n"); 357spi_transport_rd_attr(rd_strm, "%d\n");
348spi_transport_rd_attr(rti, "%d\n"); 358spi_transport_rd_attr(rti, "%d\n");
349spi_transport_rd_attr(pcomp_en, "%d\n"); 359spi_transport_rd_attr(pcomp_en, "%d\n");
360spi_transport_rd_attr(hold_mcs, "%d\n");
350 361
351/* we only care about the first child device so we return 1 */ 362/* we only care about the first child device so we return 1 */
352static int child_iter(struct device *dev, void *data) 363static int child_iter(struct device *dev, void *data)
@@ -539,13 +550,13 @@ enum spi_compare_returns {
539/* This is for read/write Domain Validation: If the device supports 550/* This is for read/write Domain Validation: If the device supports
540 * an echo buffer, we do read/write tests to it */ 551 * an echo buffer, we do read/write tests to it */
541static enum spi_compare_returns 552static enum spi_compare_returns
542spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer, 553spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
543 u8 *ptr, const int retries) 554 u8 *ptr, const int retries)
544{ 555{
545 struct scsi_device *sdev = sreq->sr_device;
546 int len = ptr - buffer; 556 int len = ptr - buffer;
547 int j, k, r; 557 int j, k, r, result;
548 unsigned int pattern = 0x0000ffff; 558 unsigned int pattern = 0x0000ffff;
559 struct scsi_sense_hdr sshdr;
549 560
550 const char spi_write_buffer[] = { 561 const char spi_write_buffer[] = {
551 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 562 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
@@ -590,14 +601,12 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
590 } 601 }
591 602
592 for (r = 0; r < retries; r++) { 603 for (r = 0; r < retries; r++) {
593 sreq->sr_cmd_len = 0; /* wait_req to fill in */ 604 result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
594 sreq->sr_data_direction = DMA_TO_DEVICE; 605 buffer, len, &sshdr);
595 spi_wait_req(sreq, spi_write_buffer, buffer, len); 606 if(result || !scsi_device_online(sdev)) {
596 if(sreq->sr_result || !scsi_device_online(sdev)) {
597 struct scsi_sense_hdr sshdr;
598 607
599 scsi_device_set_state(sdev, SDEV_QUIESCE); 608 scsi_device_set_state(sdev, SDEV_QUIESCE);
600 if (scsi_request_normalize_sense(sreq, &sshdr) 609 if (scsi_sense_valid(&sshdr)
601 && sshdr.sense_key == ILLEGAL_REQUEST 610 && sshdr.sense_key == ILLEGAL_REQUEST
602 /* INVALID FIELD IN CDB */ 611 /* INVALID FIELD IN CDB */
603 && sshdr.asc == 0x24 && sshdr.ascq == 0x00) 612 && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
@@ -609,14 +618,13 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
609 return SPI_COMPARE_SKIP_TEST; 618 return SPI_COMPARE_SKIP_TEST;
610 619
611 620
612 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result); 621 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", result);
613 return SPI_COMPARE_FAILURE; 622 return SPI_COMPARE_FAILURE;
614 } 623 }
615 624
616 memset(ptr, 0, len); 625 memset(ptr, 0, len);
617 sreq->sr_cmd_len = 0; /* wait_req to fill in */ 626 spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
618 sreq->sr_data_direction = DMA_FROM_DEVICE; 627 ptr, len, NULL);
619 spi_wait_req(sreq, spi_read_buffer, ptr, len);
620 scsi_device_set_state(sdev, SDEV_QUIESCE); 628 scsi_device_set_state(sdev, SDEV_QUIESCE);
621 629
622 if (memcmp(buffer, ptr, len) != 0) 630 if (memcmp(buffer, ptr, len) != 0)
@@ -628,25 +636,22 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
628/* This is for the simplest form of Domain Validation: a read test 636/* This is for the simplest form of Domain Validation: a read test
629 * on the inquiry data from the device */ 637 * on the inquiry data from the device */
630static enum spi_compare_returns 638static enum spi_compare_returns
631spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer, 639spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
632 u8 *ptr, const int retries) 640 u8 *ptr, const int retries)
633{ 641{
634 int r; 642 int r, result;
635 const int len = sreq->sr_device->inquiry_len; 643 const int len = sdev->inquiry_len;
636 struct scsi_device *sdev = sreq->sr_device;
637 const char spi_inquiry[] = { 644 const char spi_inquiry[] = {
638 INQUIRY, 0, 0, 0, len, 0 645 INQUIRY, 0, 0, 0, len, 0
639 }; 646 };
640 647
641 for (r = 0; r < retries; r++) { 648 for (r = 0; r < retries; r++) {
642 sreq->sr_cmd_len = 0; /* wait_req to fill in */
643 sreq->sr_data_direction = DMA_FROM_DEVICE;
644
645 memset(ptr, 0, len); 649 memset(ptr, 0, len);
646 650
647 spi_wait_req(sreq, spi_inquiry, ptr, len); 651 result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
652 ptr, len, NULL);
648 653
649 if(sreq->sr_result || !scsi_device_online(sdev)) { 654 if(result || !scsi_device_online(sdev)) {
650 scsi_device_set_state(sdev, SDEV_QUIESCE); 655 scsi_device_set_state(sdev, SDEV_QUIESCE);
651 return SPI_COMPARE_FAILURE; 656 return SPI_COMPARE_FAILURE;
652 } 657 }
@@ -667,12 +672,11 @@ spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer,
667} 672}
668 673
669static enum spi_compare_returns 674static enum spi_compare_returns
670spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr, 675spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
671 enum spi_compare_returns 676 enum spi_compare_returns
672 (*compare_fn)(struct scsi_request *, u8 *, u8 *, int)) 677 (*compare_fn)(struct scsi_device *, u8 *, u8 *, int))
673{ 678{
674 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 679 struct spi_internal *i = to_spi_internal(sdev->host->transportt);
675 struct scsi_device *sdev = sreq->sr_device;
676 struct scsi_target *starget = sdev->sdev_target; 680 struct scsi_target *starget = sdev->sdev_target;
677 int period = 0, prevperiod = 0; 681 int period = 0, prevperiod = 0;
678 enum spi_compare_returns retval; 682 enum spi_compare_returns retval;
@@ -680,7 +684,7 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
680 684
681 for (;;) { 685 for (;;) {
682 int newperiod; 686 int newperiod;
683 retval = compare_fn(sreq, buffer, ptr, DV_LOOPS); 687 retval = compare_fn(sdev, buffer, ptr, DV_LOOPS);
684 688
685 if (retval == SPI_COMPARE_SUCCESS 689 if (retval == SPI_COMPARE_SUCCESS
686 || retval == SPI_COMPARE_SKIP_TEST) 690 || retval == SPI_COMPARE_SKIP_TEST)
@@ -726,9 +730,9 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
726} 730}
727 731
728static int 732static int
729spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer) 733spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
730{ 734{
731 int l; 735 int l, result;
732 736
733 /* first off do a test unit ready. This can error out 737 /* first off do a test unit ready. This can error out
734 * because of reservations or some other reason. If it 738 * because of reservations or some other reason. If it
@@ -744,18 +748,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
744 }; 748 };
745 749
746 750
747 sreq->sr_cmd_len = 0;
748 sreq->sr_data_direction = DMA_NONE;
749
750 /* We send a set of three TURs to clear any outstanding 751 /* We send a set of three TURs to clear any outstanding
751 * unit attention conditions if they exist (Otherwise the 752 * unit attention conditions if they exist (Otherwise the
752 * buffer tests won't be happy). If the TUR still fails 753 * buffer tests won't be happy). If the TUR still fails
753 * (reservation conflict, device not ready, etc) just 754 * (reservation conflict, device not ready, etc) just
754 * skip the write tests */ 755 * skip the write tests */
755 for (l = 0; ; l++) { 756 for (l = 0; ; l++) {
756 spi_wait_req(sreq, spi_test_unit_ready, NULL, 0); 757 result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
758 NULL, 0, NULL);
757 759
758 if(sreq->sr_result) { 760 if(result) {
759 if(l >= 3) 761 if(l >= 3)
760 return 0; 762 return 0;
761 } else { 763 } else {
@@ -764,12 +766,10 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
764 } 766 }
765 } 767 }
766 768
767 sreq->sr_cmd_len = 0; 769 result = spi_execute(sdev, spi_read_buffer_descriptor,
768 sreq->sr_data_direction = DMA_FROM_DEVICE; 770 DMA_FROM_DEVICE, buffer, 4, NULL);
769
770 spi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4);
771 771
772 if (sreq->sr_result) 772 if (result)
773 /* Device has no echo buffer */ 773 /* Device has no echo buffer */
774 return 0; 774 return 0;
775 775
@@ -777,17 +777,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
777} 777}
778 778
779static void 779static void
780spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer) 780spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
781{ 781{
782 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 782 struct spi_internal *i = to_spi_internal(sdev->host->transportt);
783 struct scsi_device *sdev = sreq->sr_device;
784 struct scsi_target *starget = sdev->sdev_target; 783 struct scsi_target *starget = sdev->sdev_target;
785 int len = sdev->inquiry_len; 784 int len = sdev->inquiry_len;
786 /* first set us up for narrow async */ 785 /* first set us up for narrow async */
787 DV_SET(offset, 0); 786 DV_SET(offset, 0);
788 DV_SET(width, 0); 787 DV_SET(width, 0);
789 788
790 if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) 789 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
791 != SPI_COMPARE_SUCCESS) { 790 != SPI_COMPARE_SUCCESS) {
792 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); 791 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
793 /* FIXME: should probably offline the device here? */ 792 /* FIXME: should probably offline the device here? */
@@ -799,7 +798,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
799 scsi_device_wide(sdev)) { 798 scsi_device_wide(sdev)) {
800 i->f->set_width(starget, 1); 799 i->f->set_width(starget, 1);
801 800
802 if (spi_dv_device_compare_inquiry(sreq, buffer, 801 if (spi_dv_device_compare_inquiry(sdev, buffer,
803 buffer + len, 802 buffer + len,
804 DV_LOOPS) 803 DV_LOOPS)
805 != SPI_COMPARE_SUCCESS) { 804 != SPI_COMPARE_SUCCESS) {
@@ -820,7 +819,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
820 819
821 len = 0; 820 len = 0;
822 if (scsi_device_dt(sdev)) 821 if (scsi_device_dt(sdev))
823 len = spi_dv_device_get_echo_buffer(sreq, buffer); 822 len = spi_dv_device_get_echo_buffer(sdev, buffer);
824 823
825 retry: 824 retry:
826 825
@@ -846,7 +845,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
846 845
847 if (len == 0) { 846 if (len == 0) {
848 SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n"); 847 SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n");
849 spi_dv_retrain(sreq, buffer, buffer + len, 848 spi_dv_retrain(sdev, buffer, buffer + len,
850 spi_dv_device_compare_inquiry); 849 spi_dv_device_compare_inquiry);
851 return; 850 return;
852 } 851 }
@@ -856,7 +855,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
856 len = SPI_MAX_ECHO_BUFFER_SIZE; 855 len = SPI_MAX_ECHO_BUFFER_SIZE;
857 } 856 }
858 857
859 if (spi_dv_retrain(sreq, buffer, buffer + len, 858 if (spi_dv_retrain(sdev, buffer, buffer + len,
860 spi_dv_device_echo_buffer) 859 spi_dv_device_echo_buffer)
861 == SPI_COMPARE_SKIP_TEST) { 860 == SPI_COMPARE_SKIP_TEST) {
862 /* OK, the stupid drive can't do a write echo buffer 861 /* OK, the stupid drive can't do a write echo buffer
@@ -879,16 +878,12 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
879void 878void
880spi_dv_device(struct scsi_device *sdev) 879spi_dv_device(struct scsi_device *sdev)
881{ 880{
882 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
883 struct scsi_target *starget = sdev->sdev_target; 881 struct scsi_target *starget = sdev->sdev_target;
884 u8 *buffer; 882 u8 *buffer;
885 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; 883 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
886 884
887 if (unlikely(!sreq))
888 return;
889
890 if (unlikely(scsi_device_get(sdev))) 885 if (unlikely(scsi_device_get(sdev)))
891 goto out_free_req; 886 return;
892 887
893 buffer = kmalloc(len, GFP_KERNEL); 888 buffer = kmalloc(len, GFP_KERNEL);
894 889
@@ -909,7 +904,7 @@ spi_dv_device(struct scsi_device *sdev)
909 904
910 SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n"); 905 SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n");
911 906
912 spi_dv_device_internal(sreq, buffer); 907 spi_dv_device_internal(sdev, buffer);
913 908
914 SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n"); 909 SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n");
915 910
@@ -924,8 +919,6 @@ spi_dv_device(struct scsi_device *sdev)
924 kfree(buffer); 919 kfree(buffer);
925 out_put: 920 out_put:
926 scsi_device_put(sdev); 921 scsi_device_put(sdev);
927 out_free_req:
928 scsi_release_request(sreq);
929} 922}
930EXPORT_SYMBOL(spi_dv_device); 923EXPORT_SYMBOL(spi_dv_device);
931 924
@@ -1028,10 +1021,17 @@ void spi_display_xfer_agreement(struct scsi_target *starget)
1028 sprint_frac(tmp, picosec, 1000); 1021 sprint_frac(tmp, picosec, 1000);
1029 1022
1030 dev_info(&starget->dev, 1023 dev_info(&starget->dev,
1031 "%s %sSCSI %d.%d MB/s %s%s%s (%s ns, offset %d)\n", 1024 "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n",
1032 scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, 1025 scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10,
1033 tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", 1026 tp->dt ? "DT" : "ST",
1034 tp->qas ? " QAS" : "", tmp, tp->offset); 1027 tp->iu ? " IU" : "",
1028 tp->qas ? " QAS" : "",
1029 tp->rd_strm ? " RDSTRM" : "",
1030 tp->rti ? " RTI" : "",
1031 tp->wr_flow ? " WRFLOW" : "",
1032 tp->pcomp_en ? " PCOMP" : "",
1033 tp->hold_mcs ? " HMCS" : "",
1034 tmp, tp->offset);
1035 } else { 1035 } else {
1036 dev_info(&starget->dev, "%sasynchronous.\n", 1036 dev_info(&starget->dev, "%sasynchronous.\n",
1037 tp->width ? "wide " : ""); 1037 tp->width ? "wide " : "");
@@ -1073,6 +1073,7 @@ static int spi_device_match(struct attribute_container *cont,
1073{ 1073{
1074 struct scsi_device *sdev; 1074 struct scsi_device *sdev;
1075 struct Scsi_Host *shost; 1075 struct Scsi_Host *shost;
1076 struct spi_internal *i;
1076 1077
1077 if (!scsi_is_sdev_device(dev)) 1078 if (!scsi_is_sdev_device(dev))
1078 return 0; 1079 return 0;
@@ -1085,6 +1086,9 @@ static int spi_device_match(struct attribute_container *cont,
1085 /* Note: this class has no device attributes, so it has 1086 /* Note: this class has no device attributes, so it has
1086 * no per-HBA allocation and thus we don't need to distinguish 1087 * no per-HBA allocation and thus we don't need to distinguish
1087 * the attribute containers for the device */ 1088 * the attribute containers for the device */
1089 i = to_spi_internal(shost->transportt);
1090 if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target))
1091 return 0;
1088 return 1; 1092 return 1;
1089} 1093}
1090 1094
@@ -1092,6 +1096,7 @@ static int spi_target_match(struct attribute_container *cont,
1092 struct device *dev) 1096 struct device *dev)
1093{ 1097{
1094 struct Scsi_Host *shost; 1098 struct Scsi_Host *shost;
1099 struct scsi_target *starget;
1095 struct spi_internal *i; 1100 struct spi_internal *i;
1096 1101
1097 if (!scsi_is_target_device(dev)) 1102 if (!scsi_is_target_device(dev))
@@ -1103,7 +1108,11 @@ static int spi_target_match(struct attribute_container *cont,
1103 return 0; 1108 return 0;
1104 1109
1105 i = to_spi_internal(shost->transportt); 1110 i = to_spi_internal(shost->transportt);
1106 1111 starget = to_scsi_target(dev);
1112
1113 if (i->f->deny_binding && i->f->deny_binding(starget))
1114 return 0;
1115
1107 return &i->t.target_attrs.ac == cont; 1116 return &i->t.target_attrs.ac == cont;
1108} 1117}
1109 1118
@@ -1154,6 +1163,7 @@ spi_attach_transport(struct spi_function_template *ft)
1154 SETUP_ATTRIBUTE(rd_strm); 1163 SETUP_ATTRIBUTE(rd_strm);
1155 SETUP_ATTRIBUTE(rti); 1164 SETUP_ATTRIBUTE(rti);
1156 SETUP_ATTRIBUTE(pcomp_en); 1165 SETUP_ATTRIBUTE(pcomp_en);
1166 SETUP_ATTRIBUTE(hold_mcs);
1157 1167
1158 /* if you add an attribute but forget to increase SPI_NUM_ATTRS 1168 /* if you add an attribute but forget to increase SPI_NUM_ATTRS
1159 * this bug will trigger */ 1169 * this bug will trigger */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0410e1bf109a..de564b386052 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -59,7 +59,6 @@
59#include <scsi/scsi_eh.h> 59#include <scsi/scsi_eh.h>
60#include <scsi/scsi_host.h> 60#include <scsi/scsi_host.h>
61#include <scsi/scsi_ioctl.h> 61#include <scsi/scsi_ioctl.h>
62#include <scsi/scsi_request.h>
63#include <scsi/scsicam.h> 62#include <scsi/scsicam.h>
64 63
65#include "scsi_logging.h" 64#include "scsi_logging.h"
@@ -125,7 +124,7 @@ static int sd_issue_flush(struct device *, sector_t *);
125static void sd_end_flush(request_queue_t *, struct request *); 124static void sd_end_flush(request_queue_t *, struct request *);
126static int sd_prepare_flush(request_queue_t *, struct request *); 125static int sd_prepare_flush(request_queue_t *, struct request *);
127static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
128 struct scsi_request *SRpnt, unsigned char *buffer); 127 unsigned char *buffer);
129 128
130static struct scsi_driver sd_template = { 129static struct scsi_driver sd_template = {
131 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
@@ -682,19 +681,13 @@ not_present:
682 681
683static int sd_sync_cache(struct scsi_device *sdp) 682static int sd_sync_cache(struct scsi_device *sdp)
684{ 683{
685 struct scsi_request *sreq;
686 int retries, res; 684 int retries, res;
685 struct scsi_sense_hdr sshdr;
687 686
688 if (!scsi_device_online(sdp)) 687 if (!scsi_device_online(sdp))
689 return -ENODEV; 688 return -ENODEV;
690 689
691 sreq = scsi_allocate_request(sdp, GFP_KERNEL);
692 if (!sreq) {
693 printk("FAILED\n No memory for request\n");
694 return -ENOMEM;
695 }
696 690
697 sreq->sr_data_direction = DMA_NONE;
698 for (retries = 3; retries > 0; --retries) { 691 for (retries = 3; retries > 0; --retries) {
699 unsigned char cmd[10] = { 0 }; 692 unsigned char cmd[10] = { 0 };
700 693
@@ -703,22 +696,20 @@ static int sd_sync_cache(struct scsi_device *sdp)
703 * Leave the rest of the command zero to indicate 696 * Leave the rest of the command zero to indicate
704 * flush everything. 697 * flush everything.
705 */ 698 */
706 scsi_wait_req(sreq, cmd, NULL, 0, SD_TIMEOUT, SD_MAX_RETRIES); 699 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
707 if (sreq->sr_result == 0) 700 SD_TIMEOUT, SD_MAX_RETRIES);
701 if (res == 0)
708 break; 702 break;
709 } 703 }
710 704
711 res = sreq->sr_result; 705 if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
712 if (res) {
713 printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
714 "host = %d, driver = %02x\n ", 706 "host = %d, driver = %02x\n ",
715 status_byte(res), msg_byte(res), 707 status_byte(res), msg_byte(res),
716 host_byte(res), driver_byte(res)); 708 host_byte(res), driver_byte(res));
717 if (driver_byte(res) & DRIVER_SENSE) 709 if (driver_byte(res) & DRIVER_SENSE)
718 scsi_print_req_sense("sd", sreq); 710 scsi_print_sense_hdr("sd", &sshdr);
719 } 711 }
720 712
721 scsi_release_request(sreq);
722 return res; 713 return res;
723} 714}
724 715
@@ -957,22 +948,19 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
957 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9); 948 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
958} 949}
959 950
960static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp) 951static int media_not_present(struct scsi_disk *sdkp,
952 struct scsi_sense_hdr *sshdr)
961{ 953{
962 struct scsi_sense_hdr sshdr;
963 954
964 if (!srp->sr_result) 955 if (!scsi_sense_valid(sshdr))
965 return 0;
966 if (!(driver_byte(srp->sr_result) & DRIVER_SENSE))
967 return 0; 956 return 0;
968 /* not invoked for commands that could return deferred errors */ 957 /* not invoked for commands that could return deferred errors */
969 if (scsi_request_normalize_sense(srp, &sshdr)) { 958 if (sshdr->sense_key != NOT_READY &&
970 if (sshdr.sense_key != NOT_READY && 959 sshdr->sense_key != UNIT_ATTENTION)
971 sshdr.sense_key != UNIT_ATTENTION) 960 return 0;
972 return 0; 961 if (sshdr->asc != 0x3A) /* medium not present */
973 if (sshdr.asc != 0x3A) /* medium not present */ 962 return 0;
974 return 0; 963
975 }
976 set_media_not_present(sdkp); 964 set_media_not_present(sdkp);
977 return 1; 965 return 1;
978} 966}
@@ -981,10 +969,10 @@ static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp)
981 * spinup disk - called only in sd_revalidate_disk() 969 * spinup disk - called only in sd_revalidate_disk()
982 */ 970 */
983static void 971static void
984sd_spinup_disk(struct scsi_disk *sdkp, char *diskname, 972sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
985 struct scsi_request *SRpnt, unsigned char *buffer) { 973{
986 unsigned char cmd[10]; 974 unsigned char cmd[10];
987 unsigned long spintime_value = 0; 975 unsigned long spintime_expire = 0;
988 int retries, spintime; 976 int retries, spintime;
989 unsigned int the_result; 977 unsigned int the_result;
990 struct scsi_sense_hdr sshdr; 978 struct scsi_sense_hdr sshdr;
@@ -1001,18 +989,13 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1001 cmd[0] = TEST_UNIT_READY; 989 cmd[0] = TEST_UNIT_READY;
1002 memset((void *) &cmd[1], 0, 9); 990 memset((void *) &cmd[1], 0, 9);
1003 991
1004 SRpnt->sr_cmd_len = 0; 992 the_result = scsi_execute_req(sdkp->device, cmd,
1005 memset(SRpnt->sr_sense_buffer, 0, 993 DMA_NONE, NULL, 0,
1006 SCSI_SENSE_BUFFERSIZE); 994 &sshdr, SD_TIMEOUT,
1007 SRpnt->sr_data_direction = DMA_NONE; 995 SD_MAX_RETRIES);
1008 996
1009 scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
1010 0/*512*/, SD_TIMEOUT, SD_MAX_RETRIES);
1011
1012 the_result = SRpnt->sr_result;
1013 if (the_result) 997 if (the_result)
1014 sense_valid = scsi_request_normalize_sense( 998 sense_valid = scsi_sense_valid(&sshdr);
1015 SRpnt, &sshdr);
1016 retries++; 999 retries++;
1017 } while (retries < 3 && 1000 } while (retries < 3 &&
1018 (!scsi_status_is_good(the_result) || 1001 (!scsi_status_is_good(the_result) ||
@@ -1024,7 +1007,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1024 * any media in it, don't bother with any of the rest of 1007 * any media in it, don't bother with any of the rest of
1025 * this crap. 1008 * this crap.
1026 */ 1009 */
1027 if (media_not_present(sdkp, SRpnt)) 1010 if (media_not_present(sdkp, &sshdr))
1028 return; 1011 return;
1029 1012
1030 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1013 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
@@ -1063,33 +1046,42 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1063 cmd[1] = 1; /* Return immediately */ 1046 cmd[1] = 1; /* Return immediately */
1064 memset((void *) &cmd[2], 0, 8); 1047 memset((void *) &cmd[2], 0, 8);
1065 cmd[4] = 1; /* Start spin cycle */ 1048 cmd[4] = 1; /* Start spin cycle */
1066 SRpnt->sr_cmd_len = 0; 1049 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1067 memset(SRpnt->sr_sense_buffer, 0, 1050 NULL, 0, &sshdr,
1068 SCSI_SENSE_BUFFERSIZE); 1051 SD_TIMEOUT, SD_MAX_RETRIES);
1069 1052 spintime_expire = jiffies + 100 * HZ;
1070 SRpnt->sr_data_direction = DMA_NONE; 1053 spintime = 1;
1071 scsi_wait_req(SRpnt, (void *)cmd,
1072 (void *) buffer, 0/*512*/,
1073 SD_TIMEOUT, SD_MAX_RETRIES);
1074 spintime_value = jiffies;
1075 } 1054 }
1076 spintime = 1;
1077 /* Wait 1 second for next try */ 1055 /* Wait 1 second for next try */
1078 msleep(1000); 1056 msleep(1000);
1079 printk("."); 1057 printk(".");
1058
1059 /*
1060 * Wait for USB flash devices with slow firmware.
1061 * Yes, this sense key/ASC combination shouldn't
1062 * occur here. It's characteristic of these devices.
1063 */
1064 } else if (sense_valid &&
1065 sshdr.sense_key == UNIT_ATTENTION &&
1066 sshdr.asc == 0x28) {
1067 if (!spintime) {
1068 spintime_expire = jiffies + 5 * HZ;
1069 spintime = 1;
1070 }
1071 /* Wait 1 second for next try */
1072 msleep(1000);
1080 } else { 1073 } else {
1081 /* we don't understand the sense code, so it's 1074 /* we don't understand the sense code, so it's
1082 * probably pointless to loop */ 1075 * probably pointless to loop */
1083 if(!spintime) { 1076 if(!spintime) {
1084 printk(KERN_NOTICE "%s: Unit Not Ready, " 1077 printk(KERN_NOTICE "%s: Unit Not Ready, "
1085 "sense:\n", diskname); 1078 "sense:\n", diskname);
1086 scsi_print_req_sense("", SRpnt); 1079 scsi_print_sense_hdr("", &sshdr);
1087 } 1080 }
1088 break; 1081 break;
1089 } 1082 }
1090 1083
1091 } while (spintime && 1084 } while (spintime && time_before_eq(jiffies, spintime_expire));
1092 time_after(spintime_value + 100 * HZ, jiffies));
1093 1085
1094 if (spintime) { 1086 if (spintime) {
1095 if (scsi_status_is_good(the_result)) 1087 if (scsi_status_is_good(the_result))
@@ -1104,14 +1096,15 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1104 */ 1096 */
1105static void 1097static void
1106sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 1098sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
1107 struct scsi_request *SRpnt, unsigned char *buffer) { 1099 unsigned char *buffer)
1100{
1108 unsigned char cmd[16]; 1101 unsigned char cmd[16];
1109 struct scsi_device *sdp = sdkp->device;
1110 int the_result, retries; 1102 int the_result, retries;
1111 int sector_size = 0; 1103 int sector_size = 0;
1112 int longrc = 0; 1104 int longrc = 0;
1113 struct scsi_sense_hdr sshdr; 1105 struct scsi_sense_hdr sshdr;
1114 int sense_valid = 0; 1106 int sense_valid = 0;
1107 struct scsi_device *sdp = sdkp->device;
1115 1108
1116repeat: 1109repeat:
1117 retries = 3; 1110 retries = 3;
@@ -1128,20 +1121,15 @@ repeat:
1128 memset((void *) buffer, 0, 8); 1121 memset((void *) buffer, 0, 8);
1129 } 1122 }
1130 1123
1131 SRpnt->sr_cmd_len = 0; 1124 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1132 memset(SRpnt->sr_sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1125 buffer, longrc ? 12 : 8, &sshdr,
1133 SRpnt->sr_data_direction = DMA_FROM_DEVICE; 1126 SD_TIMEOUT, SD_MAX_RETRIES);
1134
1135 scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
1136 longrc ? 12 : 8, SD_TIMEOUT, SD_MAX_RETRIES);
1137 1127
1138 if (media_not_present(sdkp, SRpnt)) 1128 if (media_not_present(sdkp, &sshdr))
1139 return; 1129 return;
1140 1130
1141 the_result = SRpnt->sr_result;
1142 if (the_result) 1131 if (the_result)
1143 sense_valid = scsi_request_normalize_sense(SRpnt, 1132 sense_valid = scsi_sense_valid(&sshdr);
1144 &sshdr);
1145 retries--; 1133 retries--;
1146 1134
1147 } while (the_result && retries); 1135 } while (the_result && retries);
@@ -1156,7 +1144,7 @@ repeat:
1156 driver_byte(the_result)); 1144 driver_byte(the_result));
1157 1145
1158 if (driver_byte(the_result) & DRIVER_SENSE) 1146 if (driver_byte(the_result) & DRIVER_SENSE)
1159 scsi_print_req_sense("sd", SRpnt); 1147 scsi_print_sense_hdr("sd", &sshdr);
1160 else 1148 else
1161 printk("%s : sense not available. \n", diskname); 1149 printk("%s : sense not available. \n", diskname);
1162 1150
@@ -1296,11 +1284,13 @@ got_data:
1296 1284
1297/* called with buffer of length 512 */ 1285/* called with buffer of length 512 */
1298static inline int 1286static inline int
1299sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage, 1287sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1300 unsigned char *buffer, int len, struct scsi_mode_data *data) 1288 unsigned char *buffer, int len, struct scsi_mode_data *data,
1289 struct scsi_sense_hdr *sshdr)
1301{ 1290{
1302 return __scsi_mode_sense(SRpnt, dbd, modepage, buffer, len, 1291 return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
1303 SD_TIMEOUT, SD_MAX_RETRIES, data); 1292 SD_TIMEOUT, SD_MAX_RETRIES, data,
1293 sshdr);
1304} 1294}
1305 1295
1306/* 1296/*
@@ -1309,25 +1299,27 @@ sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage,
1309 */ 1299 */
1310static void 1300static void
1311sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname, 1301sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1312 struct scsi_request *SRpnt, unsigned char *buffer) { 1302 unsigned char *buffer)
1303{
1313 int res; 1304 int res;
1305 struct scsi_device *sdp = sdkp->device;
1314 struct scsi_mode_data data; 1306 struct scsi_mode_data data;
1315 1307
1316 set_disk_ro(sdkp->disk, 0); 1308 set_disk_ro(sdkp->disk, 0);
1317 if (sdkp->device->skip_ms_page_3f) { 1309 if (sdp->skip_ms_page_3f) {
1318 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname); 1310 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname);
1319 return; 1311 return;
1320 } 1312 }
1321 1313
1322 if (sdkp->device->use_192_bytes_for_3f) { 1314 if (sdp->use_192_bytes_for_3f) {
1323 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 192, &data); 1315 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
1324 } else { 1316 } else {
1325 /* 1317 /*
1326 * First attempt: ask for all pages (0x3F), but only 4 bytes. 1318 * First attempt: ask for all pages (0x3F), but only 4 bytes.
1327 * We have to start carefully: some devices hang if we ask 1319 * We have to start carefully: some devices hang if we ask
1328 * for more than is available. 1320 * for more than is available.
1329 */ 1321 */
1330 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 4, &data); 1322 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
1331 1323
1332 /* 1324 /*
1333 * Second attempt: ask for page 0 When only page 0 is 1325 * Second attempt: ask for page 0 When only page 0 is
@@ -1336,14 +1328,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1336 * CDB. 1328 * CDB.
1337 */ 1329 */
1338 if (!scsi_status_is_good(res)) 1330 if (!scsi_status_is_good(res))
1339 res = sd_do_mode_sense(SRpnt, 0, 0, buffer, 4, &data); 1331 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
1340 1332
1341 /* 1333 /*
1342 * Third attempt: ask 255 bytes, as we did earlier. 1334 * Third attempt: ask 255 bytes, as we did earlier.
1343 */ 1335 */
1344 if (!scsi_status_is_good(res)) 1336 if (!scsi_status_is_good(res))
1345 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 255, 1337 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
1346 &data); 1338 &data, NULL);
1347 } 1339 }
1348 1340
1349 if (!scsi_status_is_good(res)) { 1341 if (!scsi_status_is_good(res)) {
@@ -1365,19 +1357,20 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1365 */ 1357 */
1366static void 1358static void
1367sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, 1359sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1368 struct scsi_request *SRpnt, unsigned char *buffer) 1360 unsigned char *buffer)
1369{ 1361{
1370 int len = 0, res; 1362 int len = 0, res;
1363 struct scsi_device *sdp = sdkp->device;
1371 1364
1372 int dbd; 1365 int dbd;
1373 int modepage; 1366 int modepage;
1374 struct scsi_mode_data data; 1367 struct scsi_mode_data data;
1375 struct scsi_sense_hdr sshdr; 1368 struct scsi_sense_hdr sshdr;
1376 1369
1377 if (sdkp->device->skip_ms_page_8) 1370 if (sdp->skip_ms_page_8)
1378 goto defaults; 1371 goto defaults;
1379 1372
1380 if (sdkp->device->type == TYPE_RBC) { 1373 if (sdp->type == TYPE_RBC) {
1381 modepage = 6; 1374 modepage = 6;
1382 dbd = 8; 1375 dbd = 8;
1383 } else { 1376 } else {
@@ -1386,7 +1379,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1386 } 1379 }
1387 1380
1388 /* cautiously ask */ 1381 /* cautiously ask */
1389 res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, 4, &data); 1382 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
1390 1383
1391 if (!scsi_status_is_good(res)) 1384 if (!scsi_status_is_good(res))
1392 goto bad_sense; 1385 goto bad_sense;
@@ -1407,7 +1400,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1407 len += data.header_length + data.block_descriptor_length; 1400 len += data.header_length + data.block_descriptor_length;
1408 1401
1409 /* Get the data */ 1402 /* Get the data */
1410 res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, len, &data); 1403 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1411 1404
1412 if (scsi_status_is_good(res)) { 1405 if (scsi_status_is_good(res)) {
1413 const char *types[] = { 1406 const char *types[] = {
@@ -1439,7 +1432,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1439 } 1432 }
1440 1433
1441bad_sense: 1434bad_sense:
1442 if (scsi_request_normalize_sense(SRpnt, &sshdr) && 1435 if (scsi_sense_valid(&sshdr) &&
1443 sshdr.sense_key == ILLEGAL_REQUEST && 1436 sshdr.sense_key == ILLEGAL_REQUEST &&
1444 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 1437 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1445 printk(KERN_NOTICE "%s: cache data unavailable\n", 1438 printk(KERN_NOTICE "%s: cache data unavailable\n",
@@ -1464,7 +1457,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
1464{ 1457{
1465 struct scsi_disk *sdkp = scsi_disk(disk); 1458 struct scsi_disk *sdkp = scsi_disk(disk);
1466 struct scsi_device *sdp = sdkp->device; 1459 struct scsi_device *sdp = sdkp->device;
1467 struct scsi_request *sreq;
1468 unsigned char *buffer; 1460 unsigned char *buffer;
1469 1461
1470 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1462 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
@@ -1476,18 +1468,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
1476 if (!scsi_device_online(sdp)) 1468 if (!scsi_device_online(sdp))
1477 goto out; 1469 goto out;
1478 1470
1479 sreq = scsi_allocate_request(sdp, GFP_KERNEL);
1480 if (!sreq) {
1481 printk(KERN_WARNING "(sd_revalidate_disk:) Request allocation "
1482 "failure.\n");
1483 goto out;
1484 }
1485
1486 buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA); 1471 buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA);
1487 if (!buffer) { 1472 if (!buffer) {
1488 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation " 1473 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation "
1489 "failure.\n"); 1474 "failure.\n");
1490 goto out_release_request; 1475 goto out;
1491 } 1476 }
1492 1477
1493 /* defaults, until the device tells us otherwise */ 1478 /* defaults, until the device tells us otherwise */
@@ -1498,25 +1483,23 @@ static int sd_revalidate_disk(struct gendisk *disk)
1498 sdkp->WCE = 0; 1483 sdkp->WCE = 0;
1499 sdkp->RCD = 0; 1484 sdkp->RCD = 0;
1500 1485
1501 sd_spinup_disk(sdkp, disk->disk_name, sreq, buffer); 1486 sd_spinup_disk(sdkp, disk->disk_name);
1502 1487
1503 /* 1488 /*
1504 * Without media there is no reason to ask; moreover, some devices 1489 * Without media there is no reason to ask; moreover, some devices
1505 * react badly if we do. 1490 * react badly if we do.
1506 */ 1491 */
1507 if (sdkp->media_present) { 1492 if (sdkp->media_present) {
1508 sd_read_capacity(sdkp, disk->disk_name, sreq, buffer); 1493 sd_read_capacity(sdkp, disk->disk_name, buffer);
1509 if (sdp->removable) 1494 if (sdp->removable)
1510 sd_read_write_protect_flag(sdkp, disk->disk_name, 1495 sd_read_write_protect_flag(sdkp, disk->disk_name,
1511 sreq, buffer); 1496 buffer);
1512 sd_read_cache_type(sdkp, disk->disk_name, sreq, buffer); 1497 sd_read_cache_type(sdkp, disk->disk_name, buffer);
1513 } 1498 }
1514 1499
1515 set_capacity(disk, sdkp->capacity); 1500 set_capacity(disk, sdkp->capacity);
1516 kfree(buffer); 1501 kfree(buffer);
1517 1502
1518 out_release_request:
1519 scsi_release_request(sreq);
1520 out: 1503 out:
1521 return 0; 1504 return 0;
1522} 1505}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e822ca0e97cf..b1b69d738d08 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -61,7 +61,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
61 61
62#ifdef CONFIG_SCSI_PROC_FS 62#ifdef CONFIG_SCSI_PROC_FS
63#include <linux/proc_fs.h> 63#include <linux/proc_fs.h>
64static char *sg_version_date = "20050328"; 64static char *sg_version_date = "20050901";
65 65
66static int sg_proc_init(void); 66static int sg_proc_init(void);
67static void sg_proc_cleanup(void); 67static void sg_proc_cleanup(void);
@@ -1027,8 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1027 if (sdp->detached) 1027 if (sdp->detached)
1028 return -ENODEV; 1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) { 1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (test_bit(SHOST_RECOVERY, 1030 if (sdp->device->host->shost_state == SHOST_RECOVERY)
1031 &sdp->device->host->shost_state))
1032 return -EBUSY; 1031 return -EBUSY;
1033 } else if (!scsi_block_when_processing_errors(sdp->device)) 1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1034 return -EBUSY; 1033 return -EBUSY;
@@ -1795,12 +1794,12 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1795 unsigned long uaddr, size_t count, int rw, 1794 unsigned long uaddr, size_t count, int rw,
1796 unsigned long max_pfn) 1795 unsigned long max_pfn)
1797{ 1796{
1797 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1798 unsigned long start = uaddr >> PAGE_SHIFT;
1799 const int nr_pages = end - start;
1798 int res, i, j; 1800 int res, i, j;
1799 unsigned int nr_pages;
1800 struct page **pages; 1801 struct page **pages;
1801 1802
1802 nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
1803
1804 /* User attempted Overflow! */ 1803 /* User attempted Overflow! */
1805 if ((uaddr + count) < uaddr) 1804 if ((uaddr + count) < uaddr)
1806 return -EINVAL; 1805 return -EINVAL;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 2f259f249522..ce63fc8312dc 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -50,10 +50,10 @@
50#include <scsi/scsi_dbg.h> 50#include <scsi/scsi_dbg.h>
51#include <scsi/scsi_device.h> 51#include <scsi/scsi_device.h>
52#include <scsi/scsi_driver.h> 52#include <scsi/scsi_driver.h>
53#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_eh.h> 54#include <scsi/scsi_eh.h>
54#include <scsi/scsi_host.h> 55#include <scsi/scsi_host.h>
55#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */ 56#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */
56#include <scsi/scsi_request.h>
57 57
58#include "scsi_logging.h" 58#include "scsi_logging.h"
59#include "sr.h" 59#include "sr.h"
@@ -199,15 +199,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot)
199 /* check multisession offset etc */ 199 /* check multisession offset etc */
200 sr_cd_check(cdi); 200 sr_cd_check(cdi);
201 201
202 /* 202 get_sectorsize(cd);
203 * If the disk changed, the capacity will now be different,
204 * so we force a re-read of this information
205 * Force 2048 for the sector size so that filesystems won't
206 * be trying to use something that is too small if the disc
207 * has changed.
208 */
209 cd->needs_sector_size = 1;
210 cd->device->sector_size = 2048;
211 } 203 }
212 return retval; 204 return retval;
213} 205}
@@ -538,13 +530,6 @@ static int sr_open(struct cdrom_device_info *cdi, int purpose)
538 if (!scsi_block_when_processing_errors(sdev)) 530 if (!scsi_block_when_processing_errors(sdev))
539 goto error_out; 531 goto error_out;
540 532
541 /*
542 * If this device did not have media in the drive at boot time, then
543 * we would have been unable to get the sector size. Check to see if
544 * this is the case, and try again.
545 */
546 if (cd->needs_sector_size)
547 get_sectorsize(cd);
548 return 0; 533 return 0;
549 534
550error_out: 535error_out:
@@ -604,7 +589,6 @@ static int sr_probe(struct device *dev)
604 cd->driver = &sr_template; 589 cd->driver = &sr_template;
605 cd->disk = disk; 590 cd->disk = disk;
606 cd->capacity = 0x1fffff; 591 cd->capacity = 0x1fffff;
607 cd->needs_sector_size = 1;
608 cd->device->changed = 1; /* force recheck CD type */ 592 cd->device->changed = 1; /* force recheck CD type */
609 cd->use = 1; 593 cd->use = 1;
610 cd->readcd_known = 0; 594 cd->readcd_known = 0;
@@ -658,43 +642,30 @@ static void get_sectorsize(struct scsi_cd *cd)
658 unsigned char *buffer; 642 unsigned char *buffer;
659 int the_result, retries = 3; 643 int the_result, retries = 3;
660 int sector_size; 644 int sector_size;
661 struct scsi_request *SRpnt = NULL;
662 request_queue_t *queue; 645 request_queue_t *queue;
663 646
664 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 647 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
665 if (!buffer) 648 if (!buffer)
666 goto Enomem; 649 goto Enomem;
667 SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
668 if (!SRpnt)
669 goto Enomem;
670 650
671 do { 651 do {
672 cmd[0] = READ_CAPACITY; 652 cmd[0] = READ_CAPACITY;
673 memset((void *) &cmd[1], 0, 9); 653 memset((void *) &cmd[1], 0, 9);
674 /* Mark as really busy */
675 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
676 SRpnt->sr_cmd_len = 0;
677
678 memset(buffer, 0, 8); 654 memset(buffer, 0, 8);
679 655
680 /* Do the command and wait.. */ 656 /* Do the command and wait.. */
681 SRpnt->sr_data_direction = DMA_FROM_DEVICE; 657 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
682 scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer, 658 buffer, 8, NULL, SR_TIMEOUT,
683 8, SR_TIMEOUT, MAX_RETRIES); 659 MAX_RETRIES);
684 660
685 the_result = SRpnt->sr_result;
686 retries--; 661 retries--;
687 662
688 } while (the_result && retries); 663 } while (the_result && retries);
689 664
690 665
691 scsi_release_request(SRpnt);
692 SRpnt = NULL;
693
694 if (the_result) { 666 if (the_result) {
695 cd->capacity = 0x1fffff; 667 cd->capacity = 0x1fffff;
696 sector_size = 2048; /* A guess, just in case */ 668 sector_size = 2048; /* A guess, just in case */
697 cd->needs_sector_size = 1;
698 } else { 669 } else {
699#if 0 670#if 0
700 if (cdrom_get_last_written(&cd->cdi, 671 if (cdrom_get_last_written(&cd->cdi,
@@ -727,7 +698,6 @@ static void get_sectorsize(struct scsi_cd *cd)
727 printk("%s: unsupported sector size %d.\n", 698 printk("%s: unsupported sector size %d.\n",
728 cd->cdi.name, sector_size); 699 cd->cdi.name, sector_size);
729 cd->capacity = 0; 700 cd->capacity = 0;
730 cd->needs_sector_size = 1;
731 } 701 }
732 702
733 cd->device->sector_size = sector_size; 703 cd->device->sector_size = sector_size;
@@ -736,7 +706,6 @@ static void get_sectorsize(struct scsi_cd *cd)
736 * Add this so that we have the ability to correctly gauge 706 * Add this so that we have the ability to correctly gauge
737 * what the device is capable of. 707 * what the device is capable of.
738 */ 708 */
739 cd->needs_sector_size = 0;
740 set_capacity(cd->disk, cd->capacity); 709 set_capacity(cd->disk, cd->capacity);
741 } 710 }
742 711
@@ -748,10 +717,7 @@ out:
748 717
749Enomem: 718Enomem:
750 cd->capacity = 0x1fffff; 719 cd->capacity = 0x1fffff;
751 sector_size = 2048; /* A guess, just in case */ 720 cd->device->sector_size = 2048; /* A guess, just in case */
752 cd->needs_sector_size = 1;
753 if (SRpnt)
754 scsi_release_request(SRpnt);
755 goto out; 721 goto out;
756} 722}
757 723
@@ -759,8 +725,8 @@ static void get_capabilities(struct scsi_cd *cd)
759{ 725{
760 unsigned char *buffer; 726 unsigned char *buffer;
761 struct scsi_mode_data data; 727 struct scsi_mode_data data;
762 struct scsi_request *SRpnt;
763 unsigned char cmd[MAX_COMMAND_SIZE]; 728 unsigned char cmd[MAX_COMMAND_SIZE];
729 struct scsi_sense_hdr sshdr;
764 unsigned int the_result; 730 unsigned int the_result;
765 int retries, rc, n; 731 int retries, rc, n;
766 732
@@ -776,19 +742,11 @@ static void get_capabilities(struct scsi_cd *cd)
776 "" 742 ""
777 }; 743 };
778 744
779 /* allocate a request for the TEST_UNIT_READY */
780 SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
781 if (!SRpnt) {
782 printk(KERN_WARNING "(get_capabilities:) Request allocation "
783 "failure.\n");
784 return;
785 }
786 745
787 /* allocate transfer buffer */ 746 /* allocate transfer buffer */
788 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 747 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
789 if (!buffer) { 748 if (!buffer) {
790 printk(KERN_ERR "sr: out of memory.\n"); 749 printk(KERN_ERR "sr: out of memory.\n");
791 scsi_release_request(SRpnt);
792 return; 750 return;
793 } 751 }
794 752
@@ -800,24 +758,19 @@ static void get_capabilities(struct scsi_cd *cd)
800 memset((void *)cmd, 0, MAX_COMMAND_SIZE); 758 memset((void *)cmd, 0, MAX_COMMAND_SIZE);
801 cmd[0] = TEST_UNIT_READY; 759 cmd[0] = TEST_UNIT_READY;
802 760
803 SRpnt->sr_cmd_len = 0; 761 the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
804 SRpnt->sr_sense_buffer[0] = 0; 762 0, &sshdr, SR_TIMEOUT,
805 SRpnt->sr_sense_buffer[2] = 0; 763 MAX_RETRIES);
806 SRpnt->sr_data_direction = DMA_NONE;
807
808 scsi_wait_req (SRpnt, (void *) cmd, buffer,
809 0, SR_TIMEOUT, MAX_RETRIES);
810 764
811 the_result = SRpnt->sr_result;
812 retries++; 765 retries++;
813 } while (retries < 5 && 766 } while (retries < 5 &&
814 (!scsi_status_is_good(the_result) || 767 (!scsi_status_is_good(the_result) ||
815 ((driver_byte(the_result) & DRIVER_SENSE) && 768 (scsi_sense_valid(&sshdr) &&
816 SRpnt->sr_sense_buffer[2] == UNIT_ATTENTION))); 769 sshdr.sense_key == UNIT_ATTENTION)));
817 770
818 /* ask for mode page 0x2a */ 771 /* ask for mode page 0x2a */
819 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 772 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
820 SR_TIMEOUT, 3, &data); 773 SR_TIMEOUT, 3, &data, NULL);
821 774
822 if (!scsi_status_is_good(rc)) { 775 if (!scsi_status_is_good(rc)) {
823 /* failed, drive doesn't have capabilities mode page */ 776 /* failed, drive doesn't have capabilities mode page */
@@ -825,7 +778,6 @@ static void get_capabilities(struct scsi_cd *cd)
825 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 778 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
826 CDC_DVD | CDC_DVD_RAM | 779 CDC_DVD | CDC_DVD_RAM |
827 CDC_SELECT_DISC | CDC_SELECT_SPEED); 780 CDC_SELECT_DISC | CDC_SELECT_SPEED);
828 scsi_release_request(SRpnt);
829 kfree(buffer); 781 kfree(buffer);
830 printk("%s: scsi-1 drive\n", cd->cdi.name); 782 printk("%s: scsi-1 drive\n", cd->cdi.name);
831 return; 783 return;
@@ -885,7 +837,6 @@ static void get_capabilities(struct scsi_cd *cd)
885 cd->device->writeable = 1; 837 cd->device->writeable = 1;
886 } 838 }
887 839
888 scsi_release_request(SRpnt);
889 kfree(buffer); 840 kfree(buffer);
890} 841}
891 842
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 0b3178007203..d2bcd99c272f 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -33,7 +33,6 @@ typedef struct scsi_cd {
33 struct scsi_device *device; 33 struct scsi_device *device;
34 unsigned int vendor; /* vendor code, see sr_vendor.c */ 34 unsigned int vendor; /* vendor code, see sr_vendor.c */
35 unsigned long ms_offset; /* for reading multisession-CD's */ 35 unsigned long ms_offset; /* for reading multisession-CD's */
36 unsigned needs_sector_size:1; /* needs to get sector size */
37 unsigned use:1; /* is this device still supportable */ 36 unsigned use:1; /* is this device still supportable */
38 unsigned xa_flag:1; /* CD has XA sectors ? */ 37 unsigned xa_flag:1; /* CD has XA sectors ? */
39 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ 38 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 82d68fdb1548..6e45ac3c43c5 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -17,7 +17,7 @@
17#include <scsi/scsi_eh.h> 17#include <scsi/scsi_eh.h>
18#include <scsi/scsi_host.h> 18#include <scsi/scsi_host.h>
19#include <scsi/scsi_ioctl.h> 19#include <scsi/scsi_ioctl.h>
20#include <scsi/scsi_request.h> 20#include <scsi/scsi_cmnd.h>
21 21
22#include "sr.h" 22#include "sr.h"
23 23
@@ -84,41 +84,37 @@ static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti
84 84
85int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) 85int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
86{ 86{
87 struct scsi_request *SRpnt;
88 struct scsi_device *SDev; 87 struct scsi_device *SDev;
89 struct request *req; 88 struct scsi_sense_hdr sshdr;
90 int result, err = 0, retries = 0; 89 int result, err = 0, retries = 0;
90 struct request_sense *sense = cgc->sense;
91 91
92 SDev = cd->device; 92 SDev = cd->device;
93 SRpnt = scsi_allocate_request(SDev, GFP_KERNEL); 93
94 if (!SRpnt) { 94 if (!sense) {
95 printk(KERN_ERR "Unable to allocate SCSI request in sr_do_ioctl"); 95 sense = kmalloc(sizeof(*sense), GFP_KERNEL);
96 err = -ENOMEM; 96 if (!sense) {
97 goto out; 97 err = -ENOMEM;
98 } 98 goto out;
99 SRpnt->sr_data_direction = cgc->data_direction; 99 }
100 }
100 101
101 retry: 102 retry:
102 if (!scsi_block_when_processing_errors(SDev)) { 103 if (!scsi_block_when_processing_errors(SDev)) {
103 err = -ENODEV; 104 err = -ENODEV;
104 goto out_free; 105 goto out;
105 } 106 }
106 107
107 scsi_wait_req(SRpnt, cgc->cmd, cgc->buffer, cgc->buflen, 108 memset(sense, 0, sizeof(*sense));
108 cgc->timeout, IOCTL_RETRIES); 109 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
109 110 cgc->buffer, cgc->buflen, (char *)sense,
110 req = SRpnt->sr_request; 111 cgc->timeout, IOCTL_RETRIES, 0);
111 if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
112 memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
113 kfree(SRpnt->sr_buffer);
114 SRpnt->sr_buffer = req->buffer;
115 }
116 112
117 result = SRpnt->sr_result; 113 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
118 114
119 /* Minimal error checking. Ignore cases we know about, and report the rest. */ 115 /* Minimal error checking. Ignore cases we know about, and report the rest. */
120 if (driver_byte(result) != 0) { 116 if (driver_byte(result) != 0) {
121 switch (SRpnt->sr_sense_buffer[2] & 0xf) { 117 switch (sshdr.sense_key) {
122 case UNIT_ATTENTION: 118 case UNIT_ATTENTION:
123 SDev->changed = 1; 119 SDev->changed = 1;
124 if (!cgc->quiet) 120 if (!cgc->quiet)
@@ -128,8 +124,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
128 err = -ENOMEDIUM; 124 err = -ENOMEDIUM;
129 break; 125 break;
130 case NOT_READY: /* This happens if there is no disc in drive */ 126 case NOT_READY: /* This happens if there is no disc in drive */
131 if (SRpnt->sr_sense_buffer[12] == 0x04 && 127 if (sshdr.asc == 0x04 &&
132 SRpnt->sr_sense_buffer[13] == 0x01) { 128 sshdr.ascq == 0x01) {
133 /* sense: Logical unit is in process of becoming ready */ 129 /* sense: Logical unit is in process of becoming ready */
134 if (!cgc->quiet) 130 if (!cgc->quiet)
135 printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name); 131 printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name);
@@ -146,37 +142,33 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
146 if (!cgc->quiet) 142 if (!cgc->quiet)
147 printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name); 143 printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name);
148#ifdef DEBUG 144#ifdef DEBUG
149 scsi_print_req_sense("sr", SRpnt); 145 scsi_print_sense_hdr("sr", &sshdr);
150#endif 146#endif
151 err = -ENOMEDIUM; 147 err = -ENOMEDIUM;
152 break; 148 break;
153 case ILLEGAL_REQUEST: 149 case ILLEGAL_REQUEST:
154 err = -EIO; 150 err = -EIO;
155 if (SRpnt->sr_sense_buffer[12] == 0x20 && 151 if (sshdr.asc == 0x20 &&
156 SRpnt->sr_sense_buffer[13] == 0x00) 152 sshdr.ascq == 0x00)
157 /* sense: Invalid command operation code */ 153 /* sense: Invalid command operation code */
158 err = -EDRIVE_CANT_DO_THIS; 154 err = -EDRIVE_CANT_DO_THIS;
159#ifdef DEBUG 155#ifdef DEBUG
160 __scsi_print_command(cgc->cmd); 156 __scsi_print_command(cgc->cmd);
161 scsi_print_req_sense("sr", SRpnt); 157 scsi_print_sense_hdr("sr", &sshdr);
162#endif 158#endif
163 break; 159 break;
164 default: 160 default:
165 printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name); 161 printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name);
166 __scsi_print_command(cgc->cmd); 162 __scsi_print_command(cgc->cmd);
167 scsi_print_req_sense("sr", SRpnt); 163 scsi_print_sense_hdr("sr", &sshdr);
168 err = -EIO; 164 err = -EIO;
169 } 165 }
170 } 166 }
171 167
172 if (cgc->sense)
173 memcpy(cgc->sense, SRpnt->sr_sense_buffer, sizeof(*cgc->sense));
174
175 /* Wake up a process waiting for device */ 168 /* Wake up a process waiting for device */
176 out_free:
177 scsi_release_request(SRpnt);
178 SRpnt = NULL;
179 out: 169 out:
170 if (!cgc->sense)
171 kfree(sense);
180 cgc->stat = err; 172 cgc->stat = err;
181 return err; 173 return err;
182} 174}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 0a7839db5752..a93308ae9736 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static char *verstr = "20050501"; 20static char *verstr = "20050830";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -219,6 +219,12 @@ static int switch_partition(struct scsi_tape *);
219 219
220static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long); 220static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
221 221
222static void scsi_tape_release(struct kref *);
223
224#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
225
226static DECLARE_MUTEX(st_ref_sem);
227
222 228
223#include "osst_detect.h" 229#include "osst_detect.h"
224#ifndef SIGS_FROM_OSST 230#ifndef SIGS_FROM_OSST
@@ -230,6 +236,46 @@ static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
230 {"OnStream", "FW-", "", "osst"} 236 {"OnStream", "FW-", "", "osst"}
231#endif 237#endif
232 238
239static struct scsi_tape *scsi_tape_get(int dev)
240{
241 struct scsi_tape *STp = NULL;
242
243 down(&st_ref_sem);
244 write_lock(&st_dev_arr_lock);
245
246 if (dev < st_dev_max && scsi_tapes != NULL)
247 STp = scsi_tapes[dev];
248 if (!STp) goto out;
249
250 kref_get(&STp->kref);
251
252 if (!STp->device)
253 goto out_put;
254
255 if (scsi_device_get(STp->device))
256 goto out_put;
257
258 goto out;
259
260out_put:
261 kref_put(&STp->kref, scsi_tape_release);
262 STp = NULL;
263out:
264 write_unlock(&st_dev_arr_lock);
265 up(&st_ref_sem);
266 return STp;
267}
268
269static void scsi_tape_put(struct scsi_tape *STp)
270{
271 struct scsi_device *sdev = STp->device;
272
273 down(&st_ref_sem);
274 kref_put(&STp->kref, scsi_tape_release);
275 scsi_device_put(sdev);
276 up(&st_ref_sem);
277}
278
233struct st_reject_data { 279struct st_reject_data {
234 char *vendor; 280 char *vendor;
235 char *model; 281 char *model;
@@ -311,7 +357,7 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
311 return 0; 357 return 0;
312 358
313 cmdstatp = &STp->buffer->cmdstat; 359 cmdstatp = &STp->buffer->cmdstat;
314 st_analyze_sense(STp->buffer->last_SRpnt, cmdstatp); 360 st_analyze_sense(SRpnt, cmdstatp);
315 361
316 if (cmdstatp->have_sense) 362 if (cmdstatp->have_sense)
317 scode = STp->buffer->cmdstat.sense_hdr.sense_key; 363 scode = STp->buffer->cmdstat.sense_hdr.sense_key;
@@ -399,10 +445,10 @@ static void st_sleep_done(struct scsi_cmnd * SCpnt)
399 445
400 (STp->buffer)->cmdstat.midlevel_result = SCpnt->result; 446 (STp->buffer)->cmdstat.midlevel_result = SCpnt->result;
401 SCpnt->request->rq_status = RQ_SCSI_DONE; 447 SCpnt->request->rq_status = RQ_SCSI_DONE;
402 (STp->buffer)->last_SRpnt = SCpnt->sc_request;
403 DEB( STp->write_pending = 0; ) 448 DEB( STp->write_pending = 0; )
404 449
405 complete(SCpnt->request->waiting); 450 if (SCpnt->request->waiting)
451 complete(SCpnt->request->waiting);
406} 452}
407 453
408/* Do the scsi command. Waits until command performed if do_wait is true. 454/* Do the scsi command. Waits until command performed if do_wait is true.
@@ -412,8 +458,20 @@ static struct scsi_request *
412st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd, 458st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
413 int bytes, int direction, int timeout, int retries, int do_wait) 459 int bytes, int direction, int timeout, int retries, int do_wait)
414{ 460{
461 struct completion *waiting;
415 unsigned char *bp; 462 unsigned char *bp;
416 463
464 /* if async, make sure there's no command outstanding */
465 if (!do_wait && ((STp->buffer)->last_SRpnt)) {
466 printk(KERN_ERR "%s: Async command already active.\n",
467 tape_name(STp));
468 if (signal_pending(current))
469 (STp->buffer)->syscall_result = (-EINTR);
470 else
471 (STp->buffer)->syscall_result = (-EBUSY);
472 return NULL;
473 }
474
417 if (SRpnt == NULL) { 475 if (SRpnt == NULL) {
418 SRpnt = scsi_allocate_request(STp->device, GFP_ATOMIC); 476 SRpnt = scsi_allocate_request(STp->device, GFP_ATOMIC);
419 if (SRpnt == NULL) { 477 if (SRpnt == NULL) {
@@ -427,7 +485,13 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
427 } 485 }
428 } 486 }
429 487
430 init_completion(&STp->wait); 488 /* If async IO, set last_SRpnt. This ptr tells write_behind_check
489 which IO is outstanding. It's nulled out when the IO completes. */
490 if (!do_wait)
491 (STp->buffer)->last_SRpnt = SRpnt;
492
493 waiting = &STp->wait;
494 init_completion(waiting);
431 SRpnt->sr_use_sg = STp->buffer->do_dio || (bytes > (STp->buffer)->frp[0].length); 495 SRpnt->sr_use_sg = STp->buffer->do_dio || (bytes > (STp->buffer)->frp[0].length);
432 if (SRpnt->sr_use_sg) { 496 if (SRpnt->sr_use_sg) {
433 if (!STp->buffer->do_dio) 497 if (!STp->buffer->do_dio)
@@ -438,17 +502,20 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
438 bp = (STp->buffer)->b_data; 502 bp = (STp->buffer)->b_data;
439 SRpnt->sr_data_direction = direction; 503 SRpnt->sr_data_direction = direction;
440 SRpnt->sr_cmd_len = 0; 504 SRpnt->sr_cmd_len = 0;
441 SRpnt->sr_request->waiting = &(STp->wait); 505 SRpnt->sr_request->waiting = waiting;
442 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY; 506 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
443 SRpnt->sr_request->rq_disk = STp->disk; 507 SRpnt->sr_request->rq_disk = STp->disk;
508 SRpnt->sr_request->end_io = blk_end_sync_rq;
444 STp->buffer->cmdstat.have_sense = 0; 509 STp->buffer->cmdstat.have_sense = 0;
445 510
446 scsi_do_req(SRpnt, (void *) cmd, bp, bytes, 511 scsi_do_req(SRpnt, (void *) cmd, bp, bytes,
447 st_sleep_done, timeout, retries); 512 st_sleep_done, timeout, retries);
448 513
449 if (do_wait) { 514 if (do_wait) {
450 wait_for_completion(SRpnt->sr_request->waiting); 515 wait_for_completion(waiting);
451 SRpnt->sr_request->waiting = NULL; 516 SRpnt->sr_request->waiting = NULL;
517 if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
518 SRpnt->sr_result |= (DRIVER_ERROR << 24);
452 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 519 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
453 } 520 }
454 return SRpnt; 521 return SRpnt;
@@ -465,6 +532,7 @@ static int write_behind_check(struct scsi_tape * STp)
465 struct st_buffer *STbuffer; 532 struct st_buffer *STbuffer;
466 struct st_partstat *STps; 533 struct st_partstat *STps;
467 struct st_cmdstatus *cmdstatp; 534 struct st_cmdstatus *cmdstatp;
535 struct scsi_request *SRpnt;
468 536
469 STbuffer = STp->buffer; 537 STbuffer = STp->buffer;
470 if (!STbuffer->writing) 538 if (!STbuffer->writing)
@@ -478,10 +546,14 @@ static int write_behind_check(struct scsi_tape * STp)
478 ) /* end DEB */ 546 ) /* end DEB */
479 547
480 wait_for_completion(&(STp->wait)); 548 wait_for_completion(&(STp->wait));
481 (STp->buffer)->last_SRpnt->sr_request->waiting = NULL; 549 SRpnt = STbuffer->last_SRpnt;
550 STbuffer->last_SRpnt = NULL;
551 SRpnt->sr_request->waiting = NULL;
552 if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
553 SRpnt->sr_result |= (DRIVER_ERROR << 24);
482 554
483 (STp->buffer)->syscall_result = st_chk_result(STp, (STp->buffer)->last_SRpnt); 555 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
484 scsi_release_request((STp->buffer)->last_SRpnt); 556 scsi_release_request(SRpnt);
485 557
486 STbuffer->buffer_bytes -= STbuffer->writing; 558 STbuffer->buffer_bytes -= STbuffer->writing;
487 STps = &(STp->ps[STp->partition]); 559 STps = &(STp->ps[STp->partition]);
@@ -1055,25 +1127,20 @@ static int st_open(struct inode *inode, struct file *filp)
1055 */ 1127 */
1056 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); 1128 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
1057 1129
1130 if (!(STp = scsi_tape_get(dev)))
1131 return -ENXIO;
1132
1058 write_lock(&st_dev_arr_lock); 1133 write_lock(&st_dev_arr_lock);
1059 if (dev >= st_dev_max || scsi_tapes == NULL ||
1060 ((STp = scsi_tapes[dev]) == NULL)) {
1061 write_unlock(&st_dev_arr_lock);
1062 return (-ENXIO);
1063 }
1064 filp->private_data = STp; 1134 filp->private_data = STp;
1065 name = tape_name(STp); 1135 name = tape_name(STp);
1066 1136
1067 if (STp->in_use) { 1137 if (STp->in_use) {
1068 write_unlock(&st_dev_arr_lock); 1138 write_unlock(&st_dev_arr_lock);
1139 scsi_tape_put(STp);
1069 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) 1140 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
1070 return (-EBUSY); 1141 return (-EBUSY);
1071 } 1142 }
1072 1143
1073 if(scsi_device_get(STp->device)) {
1074 write_unlock(&st_dev_arr_lock);
1075 return (-ENXIO);
1076 }
1077 STp->in_use = 1; 1144 STp->in_use = 1;
1078 write_unlock(&st_dev_arr_lock); 1145 write_unlock(&st_dev_arr_lock);
1079 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; 1146 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
@@ -1118,7 +1185,7 @@ static int st_open(struct inode *inode, struct file *filp)
1118 err_out: 1185 err_out:
1119 normalize_buffer(STp->buffer); 1186 normalize_buffer(STp->buffer);
1120 STp->in_use = 0; 1187 STp->in_use = 0;
1121 scsi_device_put(STp->device); 1188 scsi_tape_put(STp);
1122 return retval; 1189 return retval;
1123 1190
1124} 1191}
@@ -1250,7 +1317,7 @@ static int st_release(struct inode *inode, struct file *filp)
1250 write_lock(&st_dev_arr_lock); 1317 write_lock(&st_dev_arr_lock);
1251 STp->in_use = 0; 1318 STp->in_use = 0;
1252 write_unlock(&st_dev_arr_lock); 1319 write_unlock(&st_dev_arr_lock);
1253 scsi_device_put(STp->device); 1320 scsi_tape_put(STp);
1254 1321
1255 return result; 1322 return result;
1256} 1323}
@@ -3887,6 +3954,7 @@ static int st_probe(struct device *dev)
3887 goto out_put_disk; 3954 goto out_put_disk;
3888 } 3955 }
3889 memset(tpnt, 0, sizeof(struct scsi_tape)); 3956 memset(tpnt, 0, sizeof(struct scsi_tape));
3957 kref_init(&tpnt->kref);
3890 tpnt->disk = disk; 3958 tpnt->disk = disk;
3891 sprintf(disk->disk_name, "st%d", i); 3959 sprintf(disk->disk_name, "st%d", i);
3892 disk->private_data = &tpnt->driver; 3960 disk->private_data = &tpnt->driver;
@@ -3902,6 +3970,7 @@ static int st_probe(struct device *dev)
3902 tpnt->tape_type = MT_ISSCSI2; 3970 tpnt->tape_type = MT_ISSCSI2;
3903 3971
3904 tpnt->buffer = buffer; 3972 tpnt->buffer = buffer;
3973 tpnt->buffer->last_SRpnt = NULL;
3905 3974
3906 tpnt->inited = 0; 3975 tpnt->inited = 0;
3907 tpnt->dirty = 0; 3976 tpnt->dirty = 0;
@@ -4076,15 +4145,10 @@ static int st_remove(struct device *dev)
4076 tpnt->modes[mode].cdevs[j] = NULL; 4145 tpnt->modes[mode].cdevs[j] = NULL;
4077 } 4146 }
4078 } 4147 }
4079 tpnt->device = NULL;
4080 4148
4081 if (tpnt->buffer) { 4149 down(&st_ref_sem);
4082 tpnt->buffer->orig_frp_segs = 0; 4150 kref_put(&tpnt->kref, scsi_tape_release);
4083 normalize_buffer(tpnt->buffer); 4151 up(&st_ref_sem);
4084 kfree(tpnt->buffer);
4085 }
4086 put_disk(tpnt->disk);
4087 kfree(tpnt);
4088 return 0; 4152 return 0;
4089 } 4153 }
4090 } 4154 }
@@ -4093,6 +4157,34 @@ static int st_remove(struct device *dev)
4093 return 0; 4157 return 0;
4094} 4158}
4095 4159
4160/**
4161 * scsi_tape_release - Called to free the Scsi_Tape structure
4162 * @kref: pointer to embedded kref
4163 *
4164 * st_ref_sem must be held entering this routine. Because it is
4165 * called on last put, you should always use the scsi_tape_get()
4166 * scsi_tape_put() helpers which manipulate the semaphore directly
4167 * and never do a direct kref_put().
4168 **/
4169static void scsi_tape_release(struct kref *kref)
4170{
4171 struct scsi_tape *tpnt = to_scsi_tape(kref);
4172 struct gendisk *disk = tpnt->disk;
4173
4174 tpnt->device = NULL;
4175
4176 if (tpnt->buffer) {
4177 tpnt->buffer->orig_frp_segs = 0;
4178 normalize_buffer(tpnt->buffer);
4179 kfree(tpnt->buffer);
4180 }
4181
4182 disk->private_data = NULL;
4183 put_disk(disk);
4184 kfree(tpnt);
4185 return;
4186}
4187
4096static void st_intr(struct scsi_cmnd *SCpnt) 4188static void st_intr(struct scsi_cmnd *SCpnt)
4097{ 4189{
4098 scsi_io_completion(SCpnt, (SCpnt->result ? 0: SCpnt->bufflen), 1); 4190 scsi_io_completion(SCpnt, (SCpnt->result ? 0: SCpnt->bufflen), 1);
@@ -4348,12 +4440,12 @@ static int st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pag
4348static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 4440static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
4349 unsigned long uaddr, size_t count, int rw) 4441 unsigned long uaddr, size_t count, int rw)
4350{ 4442{
4443 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
4444 unsigned long start = uaddr >> PAGE_SHIFT;
4445 const int nr_pages = end - start;
4351 int res, i, j; 4446 int res, i, j;
4352 unsigned int nr_pages;
4353 struct page **pages; 4447 struct page **pages;
4354 4448
4355 nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
4356
4357 /* User attempted Overflow! */ 4449 /* User attempted Overflow! */
4358 if ((uaddr + count) < uaddr) 4450 if ((uaddr + count) < uaddr)
4359 return -EINVAL; 4451 return -EINVAL;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 061da111398e..790acac160bc 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -3,7 +3,7 @@
3#define _ST_H 3#define _ST_H
4 4
5#include <linux/completion.h> 5#include <linux/completion.h>
6 6#include <linux/kref.h>
7 7
8/* Descriptor for analyzed sense data */ 8/* Descriptor for analyzed sense data */
9struct st_cmdstatus { 9struct st_cmdstatus {
@@ -156,6 +156,7 @@ struct scsi_tape {
156 unsigned char last_sense[16]; 156 unsigned char last_sense[16];
157#endif 157#endif
158 struct gendisk *disk; 158 struct gendisk *disk;
159 struct kref kref;
159}; 160};
160 161
161/* Bit masks for use_pf */ 162/* Bit masks for use_pf */