aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 21:57:35 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 21:57:35 -0400
commit97d41e90fe61399b99d74820cb7f2d6e0fbac91d (patch)
treef759371424a26963b04badbb4433e360be4e8750 /drivers
parent3bdc9d0b408e01c4e556daba0035ba37f603e920 (diff)
parentafaf5a2d341d33b66b47c2716a263ce593460a08 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (54 commits) [SCSI] Initial Commit of qla4xxx [SCSI] raid class: handle component-add errors [SCSI] SCSI megaraid_sas: handle thrown errors [SCSI] SCSI aic94xx: handle sysfs errors [SCSI] SCSI st: fix error handling in module init, sysfs [SCSI] SCSI sd: fix module init/exit error handling [SCSI] SCSI osst: add error handling to module init, sysfs [SCSI] scsi: remove hosts.h [SCSI] scsi: Scsi_Cmnd convertion in aic7xxx_old.c [SCSI] megaraid_sas: sets ioctl timeout and updates version,changelog [SCSI] megaraid_sas: adds tasklet for cmd completion [SCSI] megaraid_sas: prints pending cmds before setting hw_crit_error [SCSI] megaraid_sas: function pointer for disable interrupt [SCSI] megaraid_sas: frame count optimization [SCSI] megaraid_sas: FW transition and q size changes [SCSI] qla2xxx: Update version number to 8.01.07-k2. [SCSI] qla2xxx: Stall mid-layer error handlers while rport is blocked. [SCSI] qla2xxx: Add MODULE_FIRMWARE tags. [SCSI] qla2xxx: Add support for host port state FC transport attribute. [SCSI] qla2xxx: Add support for fabric name FC transport attribute. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/linux_compat.h9
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx_old.c298
-rw-r--r--drivers/scsi/aic94xx/Kconfig1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c41
-rw-r--r--drivers/scsi/arm/acornscsi.c48
-rw-r--r--drivers/scsi/arm/acornscsi.h4
-rw-r--r--drivers/scsi/arm/fas216.c50
-rw-r--r--drivers/scsi/arm/fas216.h36
-rw-r--r--drivers/scsi/arm/queue.c37
-rw-r--r--drivers/scsi/arm/queue.h28
-rw-r--r--drivers/scsi/arm/scsi.h2
-rw-r--r--drivers/scsi/dc395x.c4
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h10
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/dpti.h2
-rw-r--r--drivers/scsi/gdth.h10
-rw-r--r--drivers/scsi/hosts.h2
-rw-r--r--drivers/scsi/ipr.c687
-rw-r--r--drivers/scsi/ipr.h18
-rw-r--r--drivers/scsi/ips.c91
-rw-r--r--drivers/scsi/ips.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c366
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h22
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/nsp32.h42
-rw-r--r--drivers/scsi/osst.c134
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/qla1280.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c228
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/Kconfig7
-rw-r--r--drivers/scsi/qla4xxx/Makefile5
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c197
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h586
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h843
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1340
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h84
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c368
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c797
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c930
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c224
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h256
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1755
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h13
-rw-r--r--drivers/scsi/raid_class.c20
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c16
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/seagate.c24
-rw-r--r--drivers/scsi/seagate.h19
-rw-r--r--drivers/scsi/sg.c53
-rw-r--r--drivers/scsi/st.c115
-rw-r--r--drivers/scsi/stex.c197
-rw-r--r--drivers/scsi/tmscsim.c12
77 files changed, 9852 insertions, 733 deletions
diff --git a/drivers/message/fusion/linux_compat.h b/drivers/message/fusion/linux_compat.h
index 048b5b8610e..bb2bf5aa0b6 100644
--- a/drivers/message/fusion/linux_compat.h
+++ b/drivers/message/fusion/linux_compat.h
@@ -6,13 +6,4 @@
6#include <linux/version.h> 6#include <linux/version.h>
7#include <scsi/scsi_device.h> 7#include <scsi/scsi_device.h>
8 8
9#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6))
10static int inline scsi_device_online(struct scsi_device *sdev)
11{
12 return sdev->online;
13}
14#endif
15
16
17/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
18#endif /* _LINUX_COMPAT_H */ 9#endif /* _LINUX_COMPAT_H */
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 5a9475e56d0..da173159ced 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2211,7 +2211,7 @@ static int __init twa_init(void)
2211{ 2211{
2212 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); 2212 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2213 2213
2214 return pci_module_init(&twa_driver); 2214 return pci_register_driver(&twa_driver);
2215} /* End twa_init() */ 2215} /* End twa_init() */
2216 2216
2217/* This function is called on driver exit */ 2217/* This function is called on driver exit */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f3a5f422a8e..2d4cb6721fa 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2486,7 +2486,7 @@ static int __init tw_init(void)
2486{ 2486{
2487 printk(KERN_WARNING "3ware Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); 2487 printk(KERN_WARNING "3ware Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2488 2488
2489 return pci_module_init(&tw_driver); 2489 return pci_register_driver(&tw_driver);
2490} /* End tw_init() */ 2490} /* End tw_init() */
2491 2491
2492/* This function is called on driver exit */ 2492/* This function is called on driver exit */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 31fe5ea1592..bbd654a2b9b 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -74,7 +74,7 @@ static char *tw_aen_string[] = {
74 [0x00D] = "ERROR: Logical unit deleted: Unit #", 74 [0x00D] = "ERROR: Logical unit deleted: Unit #",
75 [0x00F] = "WARNING: SMART threshold exceeded: Port #", 75 [0x00F] = "WARNING: SMART threshold exceeded: Port #",
76 [0x021] = "WARNING: ATA UDMA downgrade: Port #", 76 [0x021] = "WARNING: ATA UDMA downgrade: Port #",
77 [0x021] = "WARNING: ATA UDMA upgrade: Port #", 77 [0x022] = "WARNING: ATA UDMA upgrade: Port #",
78 [0x023] = "WARNING: Sector repair occurred: Port #", 78 [0x023] = "WARNING: Sector repair occurred: Port #",
79 [0x024] = "ERROR: SBUF integrity check failure", 79 [0x024] = "ERROR: SBUF integrity check failure",
80 [0x025] = "ERROR: Lost cached write: Port #", 80 [0x025] = "ERROR: Lost cached write: Port #",
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c6dfb6fa13b..9540eb8efdc 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1016,7 +1016,7 @@ config SCSI_SYM53C8XX_MMIO
1016 1016
1017config SCSI_IPR 1017config SCSI_IPR
1018 tristate "IBM Power Linux RAID adapter support" 1018 tristate "IBM Power Linux RAID adapter support"
1019 depends on PCI && SCSI 1019 depends on PCI && SCSI && ATA
1020 select FW_LOADER 1020 select FW_LOADER
1021 ---help--- 1021 ---help---
1022 This driver supports the IBM Power Linux family RAID adapters. 1022 This driver supports the IBM Power Linux family RAID adapters.
@@ -1246,6 +1246,7 @@ config SCSI_QLOGICPTI
1246 module will be called qlogicpti. 1246 module will be called qlogicpti.
1247 1247
1248source "drivers/scsi/qla2xxx/Kconfig" 1248source "drivers/scsi/qla2xxx/Kconfig"
1249source "drivers/scsi/qla4xxx/Kconfig"
1249 1250
1250config SCSI_LPFC 1251config SCSI_LPFC
1251 tristate "Emulex LightPulse Fibre Channel Support" 1252 tristate "Emulex LightPulse Fibre Channel Support"
@@ -1262,8 +1263,8 @@ config SCSI_SEAGATE
1262 These are 8-bit SCSI controllers; the ST-01 is also supported by 1263 These are 8-bit SCSI controllers; the ST-01 is also supported by
1263 this driver. It is explained in section 3.9 of the SCSI-HOWTO, 1264 this driver. It is explained in section 3.9 of the SCSI-HOWTO,
1264 available from <http://www.tldp.org/docs.html#howto>. If it 1265 available from <http://www.tldp.org/docs.html#howto>. If it
1265 doesn't work out of the box, you may have to change some settings in 1266 doesn't work out of the box, you may have to change some macros at
1266 <file:drivers/scsi/seagate.h>. 1267 compiletime, which are described in <file:drivers/scsi/seagate.c>.
1267 1268
1268 To compile this driver as a module, choose M here: the 1269 To compile this driver as a module, choose M here: the
1269 module will be called seagate. 1270 module will be called seagate.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1ef951be7a5..bcca39c3bcb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
84obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o 84obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
85obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 85obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
87obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
87obj-$(CONFIG_SCSI_LPFC) += lpfc/ 88obj-$(CONFIG_SCSI_LPFC) += lpfc/
88obj-$(CONFIG_SCSI_PAS16) += pas16.o 89obj-$(CONFIG_SCSI_PAS16) += pas16.o
89obj-$(CONFIG_SCSI_SEAGATE) += seagate.o 90obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index d7e9fab54c6..2684150917e 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1187,7 +1187,7 @@ static struct pci_driver inia100_pci_driver = {
1187 1187
1188static int __init inia100_init(void) 1188static int __init inia100_init(void)
1189{ 1189{
1190 return pci_module_init(&inia100_pci_driver); 1190 return pci_register_driver(&inia100_pci_driver);
1191} 1191}
1192 1192
1193static void __exit inia100_exit(void) 1193static void __exit inia100_exit(void)
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 8ad3ce945b9..a3266e066c0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -527,7 +527,8 @@ ahd_inw(struct ahd_softc *ahd, u_int port)
527 * or have other side effects when the low byte is 527 * or have other side effects when the low byte is
528 * read. 528 * read.
529 */ 529 */
530 return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); 530 uint16_t r = ahd_inb(ahd, port+1) << 8;
531 return r | ahd_inb(ahd, port);
531} 532}
532 533
533static __inline void 534static __inline void
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 50a41eda580..4b535420180 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -198,7 +198,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
198int 198int
199ahd_linux_pci_init(void) 199ahd_linux_pci_init(void)
200{ 200{
201 return (pci_module_init(&aic79xx_pci_driver)); 201 return pci_register_driver(&aic79xx_pci_driver);
202} 202}
203 203
204void 204void
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index 2cc8a17ed8b..8e1954cdd84 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -300,7 +300,8 @@ ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
300static __inline uint16_t 300static __inline uint16_t
301ahc_inw(struct ahc_softc *ahc, u_int port) 301ahc_inw(struct ahc_softc *ahc, u_int port)
302{ 302{
303 return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); 303 uint16_t r = ahc_inb(ahc, port+1) << 8;
304 return r | ahc_inb(ahc, port);
304} 305}
305 306
306static __inline void 307static __inline void
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 7e42f07a27f..d20ca514e9f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -246,8 +246,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
246int 246int
247ahc_linux_pci_init(void) 247ahc_linux_pci_init(void)
248{ 248{
249 /* Translate error or zero return into zero or one */ 249 return pci_register_driver(&aic7xxx_pci_driver);
250 return pci_module_init(&aic7xxx_pci_driver) ? 0 : 1;
251} 250}
252 251
253void 252void
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 10353379a07..3eae8062a02 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -780,24 +780,26 @@ typedef enum {
780} ahc_bugs; 780} ahc_bugs;
781 781
782struct aic7xxx_scb { 782struct aic7xxx_scb {
783 struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */ 783 struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */
784 Scsi_Cmnd *cmd; /* Scsi_Cmnd for this scb */ 784 struct scsi_cmnd *cmd; /* scsi_cmnd for this scb */
785 struct aic7xxx_scb *q_next; /* next scb in queue */ 785 struct aic7xxx_scb *q_next; /* next scb in queue */
786 volatile scb_flag_type flags; /* current state of scb */ 786 volatile scb_flag_type flags; /* current state of scb */
787 struct hw_scatterlist *sg_list; /* SG list in adapter format */ 787 struct hw_scatterlist *sg_list; /* SG list in adapter format */
788 unsigned char tag_action; 788 unsigned char tag_action;
789 unsigned char sg_count; 789 unsigned char sg_count;
790 unsigned char *sense_cmd; /* 790 unsigned char *sense_cmd; /*
791 * Allocate 6 characters for 791 * Allocate 6 characters for
792 * sense command. 792 * sense command.
793 */ 793 */
794 unsigned char *cmnd; 794 unsigned char *cmnd;
795 unsigned int sg_length; /* We init this during buildscb so we 795 unsigned int sg_length; /*
796 * don't have to calculate anything 796 * We init this during
797 * during underflow/overflow/stat code 797 * buildscb so we don't have
798 */ 798 * to calculate anything during
799 void *kmalloc_ptr; 799 * underflow/overflow/stat code
800 struct aic7xxx_scb_dma *scb_dma; 800 */
801 void *kmalloc_ptr;
802 struct aic7xxx_scb_dma *scb_dma;
801}; 803};
802 804
803/* 805/*
@@ -918,79 +920,77 @@ struct aic7xxx_host {
918 * We are grouping things here....first, items that get either read or 920 * We are grouping things here....first, items that get either read or
919 * written with nearly every interrupt 921 * written with nearly every interrupt
920 */ 922 */
921 volatile long flags; 923 volatile long flags;
922 ahc_feature features; /* chip features */ 924 ahc_feature features; /* chip features */
923 unsigned long base; /* card base address */ 925 unsigned long base; /* card base address */
924 volatile unsigned char __iomem *maddr; /* memory mapped address */ 926 volatile unsigned char __iomem *maddr; /* memory mapped address */
925 unsigned long isr_count; /* Interrupt count */ 927 unsigned long isr_count; /* Interrupt count */
926 unsigned long spurious_int; 928 unsigned long spurious_int;
927 scb_data_type *scb_data; 929 scb_data_type *scb_data;
928 struct aic7xxx_cmd_queue { 930 struct aic7xxx_cmd_queue {
929 Scsi_Cmnd *head; 931 struct scsi_cmnd *head;
930 Scsi_Cmnd *tail; 932 struct scsi_cmnd *tail;
931 } completeq; 933 } completeq;
932 934
933 /* 935 /*
934 * Things read/written on nearly every entry into aic7xxx_queue() 936 * Things read/written on nearly every entry into aic7xxx_queue()
935 */ 937 */
936 volatile scb_queue_type waiting_scbs; 938 volatile scb_queue_type waiting_scbs;
937 unsigned char unpause; /* unpause value for HCNTRL */ 939 unsigned char unpause; /* unpause value for HCNTRL */
938 unsigned char pause; /* pause value for HCNTRL */ 940 unsigned char pause; /* pause value for HCNTRL */
939 volatile unsigned char qoutfifonext; 941 volatile unsigned char qoutfifonext;
940 volatile unsigned char activescbs; /* active scbs */ 942 volatile unsigned char activescbs; /* active scbs */
941 volatile unsigned char max_activescbs; 943 volatile unsigned char max_activescbs;
942 volatile unsigned char qinfifonext; 944 volatile unsigned char qinfifonext;
943 volatile unsigned char *untagged_scbs; 945 volatile unsigned char *untagged_scbs;
944 volatile unsigned char *qoutfifo; 946 volatile unsigned char *qoutfifo;
945 volatile unsigned char *qinfifo; 947 volatile unsigned char *qinfifo;
946 948
947 unsigned char dev_last_queue_full[MAX_TARGETS]; 949 unsigned char dev_last_queue_full[MAX_TARGETS];
948 unsigned char dev_last_queue_full_count[MAX_TARGETS]; 950 unsigned char dev_last_queue_full_count[MAX_TARGETS];
949 unsigned short ultraenb; /* Gets downloaded to card as a 951 unsigned short ultraenb; /* Gets downloaded to card as a bitmap */
950 bitmap */ 952 unsigned short discenable; /* Gets downloaded to card as a bitmap */
951 unsigned short discenable; /* Gets downloaded to card as a 953 transinfo_type user[MAX_TARGETS];
952 bitmap */ 954
953 transinfo_type user[MAX_TARGETS]; 955 unsigned char msg_buf[13]; /* The message for the target */
954 956 unsigned char msg_type;
955 unsigned char msg_buf[13]; /* The message for the target */
956 unsigned char msg_type;
957#define MSG_TYPE_NONE 0x00 957#define MSG_TYPE_NONE 0x00
958#define MSG_TYPE_INITIATOR_MSGOUT 0x01 958#define MSG_TYPE_INITIATOR_MSGOUT 0x01
959#define MSG_TYPE_INITIATOR_MSGIN 0x02 959#define MSG_TYPE_INITIATOR_MSGIN 0x02
960 unsigned char msg_len; /* Length of message */ 960 unsigned char msg_len; /* Length of message */
961 unsigned char msg_index; /* Index into msg_buf array */ 961 unsigned char msg_index; /* Index into msg_buf array */
962 962
963 963
964 /* 964 /*
965 * We put the less frequently used host structure items after the more 965 * We put the less frequently used host structure items
966 * frequently used items to try and ease the burden on the cache subsystem. 966 * after the more frequently used items to try and ease
967 * These entries are not *commonly* accessed, whereas the preceding entries 967 * the burden on the cache subsystem.
968 * are accessed very often. 968 * These entries are not *commonly* accessed, whereas
969 */ 969 * the preceding entries are accessed very often.
970 970 */
971 unsigned int irq; /* IRQ for this adapter */
972 int instance; /* aic7xxx instance number */
973 int scsi_id; /* host adapter SCSI ID */
974 int scsi_id_b; /* channel B for twin adapters */
975 unsigned int bios_address;
976 int board_name_index;
977 unsigned short bios_control; /* bios control - SEEPROM */
978 unsigned short adapter_control; /* adapter control - SEEPROM */
979 struct pci_dev *pdev;
980 unsigned char pci_bus;
981 unsigned char pci_device_fn;
982 struct seeprom_config sc;
983 unsigned short sc_type;
984 unsigned short sc_size;
985 struct aic7xxx_host *next; /* allow for multiple IRQs */
986 struct Scsi_Host *host; /* pointer to scsi host */
987 struct list_head aic_devs; /* all aic_dev structs on host */
988 int host_no; /* SCSI host number */
989 unsigned long mbase; /* I/O memory address */
990 ahc_chip chip; /* chip type */
991 ahc_bugs bugs;
992 dma_addr_t fifo_dma; /* DMA handle for fifo arrays */
993 971
972 unsigned int irq; /* IRQ for this adapter */
973 int instance; /* aic7xxx instance number */
974 int scsi_id; /* host adapter SCSI ID */
975 int scsi_id_b; /* channel B for twin adapters */
976 unsigned int bios_address;
977 int board_name_index;
978 unsigned short bios_control; /* bios control - SEEPROM */
979 unsigned short adapter_control; /* adapter control - SEEPROM */
980 struct pci_dev *pdev;
981 unsigned char pci_bus;
982 unsigned char pci_device_fn;
983 struct seeprom_config sc;
984 unsigned short sc_type;
985 unsigned short sc_size;
986 struct aic7xxx_host *next; /* allow for multiple IRQs */
987 struct Scsi_Host *host; /* pointer to scsi host */
988 struct list_head aic_devs; /* all aic_dev structs on host */
989 int host_no; /* SCSI host number */
990 unsigned long mbase; /* I/O memory address */
991 ahc_chip chip; /* chip type */
992 ahc_bugs bugs;
993 dma_addr_t fifo_dma; /* DMA handle for fifo arrays */
994}; 994};
995 995
996/* 996/*
@@ -1271,7 +1271,7 @@ static void aic7xxx_set_syncrate(struct aic7xxx_host *p,
1271static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, 1271static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel,
1272 int lun, unsigned int width, unsigned int type, 1272 int lun, unsigned int width, unsigned int type,
1273 struct aic_dev_data *aic_dev); 1273 struct aic_dev_data *aic_dev);
1274static void aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd); 1274static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd);
1275static void aic7xxx_print_card(struct aic7xxx_host *p); 1275static void aic7xxx_print_card(struct aic7xxx_host *p);
1276static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p); 1276static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p);
1277static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded); 1277static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded);
@@ -2626,7 +2626,7 @@ aic7xxx_allocate_scb(struct aic7xxx_host *p)
2626 * we're finished. This function queues the completed commands. 2626 * we're finished. This function queues the completed commands.
2627 *-F*************************************************************************/ 2627 *-F*************************************************************************/
2628static void 2628static void
2629aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, Scsi_Cmnd *cmd) 2629aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, struct scsi_cmnd *cmd)
2630{ 2630{
2631 aic7xxx_position(cmd) = SCB_LIST_NULL; 2631 aic7xxx_position(cmd) = SCB_LIST_NULL;
2632 cmd->host_scribble = (char *)p->completeq.head; 2632 cmd->host_scribble = (char *)p->completeq.head;
@@ -2640,18 +2640,16 @@ aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
2640 * Description: 2640 * Description:
2641 * Process the completed command queue. 2641 * Process the completed command queue.
2642 *-F*************************************************************************/ 2642 *-F*************************************************************************/
2643static void 2643static void aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
2644aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
2645{ 2644{
2646 Scsi_Cmnd *cmd; 2645 struct scsi_cmnd *cmd;
2647 2646
2648 while (p->completeq.head != NULL) 2647 while (p->completeq.head != NULL) {
2649 { 2648 cmd = p->completeq.head;
2650 cmd = p->completeq.head; 2649 p->completeq.head = (struct scsi_Cmnd *) cmd->host_scribble;
2651 p->completeq.head = (Scsi_Cmnd *)cmd->host_scribble; 2650 cmd->host_scribble = NULL;
2652 cmd->host_scribble = NULL; 2651 cmd->scsi_done(cmd);
2653 cmd->scsi_done(cmd); 2652 }
2654 }
2655} 2653}
2656 2654
2657/*+F************************************************************************* 2655/*+F*************************************************************************
@@ -2687,11 +2685,11 @@ aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2687static void 2685static void
2688aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb) 2686aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2689{ 2687{
2690 Scsi_Cmnd *cmd = scb->cmd; 2688 struct scsi_cmnd *cmd = scb->cmd;
2691 struct aic_dev_data *aic_dev = cmd->device->hostdata; 2689 struct aic_dev_data *aic_dev = cmd->device->hostdata;
2692 int tindex = TARGET_INDEX(cmd); 2690 int tindex = TARGET_INDEX(cmd);
2693 struct aic7xxx_scb *scbp; 2691 struct aic7xxx_scb *scbp;
2694 unsigned char queue_depth; 2692 unsigned char queue_depth;
2695 2693
2696 if (cmd->use_sg > 1) 2694 if (cmd->use_sg > 1)
2697 { 2695 {
@@ -2891,7 +2889,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2891 * aic7xxx_run_done_queue 2889 * aic7xxx_run_done_queue
2892 * 2890 *
2893 * Description: 2891 * Description:
2894 * Calls the aic7xxx_done() for the Scsi_Cmnd of each scb in the 2892 * Calls the aic7xxx_done() for the scsi_cmnd of each scb in the
2895 * aborted list, and adds each scb to the free list. If complete 2893 * aborted list, and adds each scb to the free list. If complete
2896 * is TRUE, we also process the commands complete list. 2894 * is TRUE, we also process the commands complete list.
2897 *-F*************************************************************************/ 2895 *-F*************************************************************************/
@@ -3826,9 +3824,9 @@ aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width)
3826static void 3824static void
3827aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb) 3825aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
3828{ 3826{
3829 struct aic7xxx_hwscb *hscb; 3827 struct aic7xxx_hwscb *hscb;
3830 Scsi_Cmnd *cmd; 3828 struct scsi_cmnd *cmd;
3831 int actual, i; 3829 int actual, i;
3832 3830
3833 cmd = scb->cmd; 3831 cmd = scb->cmd;
3834 hscb = scb->hscb; 3832 hscb = scb->hscb;
@@ -4219,20 +4217,20 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
4219 4217
4220 case BAD_STATUS: 4218 case BAD_STATUS:
4221 { 4219 {
4222 unsigned char scb_index; 4220 unsigned char scb_index;
4223 struct aic7xxx_hwscb *hscb; 4221 struct aic7xxx_hwscb *hscb;
4224 Scsi_Cmnd *cmd; 4222 struct scsi_cmnd *cmd;
4225 4223
4226 /* The sequencer will notify us when a command has an error that 4224 /* The sequencer will notify us when a command has an error that
4227 * would be of interest to the kernel. This allows us to leave 4225 * would be of interest to the kernel. This allows us to leave
4228 * the sequencer running in the common case of command completes 4226 * the sequencer running in the common case of command completes
4229 * without error. The sequencer will have DMA'd the SCB back 4227 * without error. The sequencer will have DMA'd the SCB back
4230 * up to us, so we can reference the drivers SCB array. 4228 * up to us, so we can reference the drivers SCB array.
4231 * 4229 *
4232 * Set the default return value to 0 indicating not to send 4230 * Set the default return value to 0 indicating not to send
4233 * sense. The sense code will change this if needed and this 4231 * sense. The sense code will change this if needed and this
4234 * reduces code duplication. 4232 * reduces code duplication.
4235 */ 4233 */
4236 aic_outb(p, 0, RETURN_1); 4234 aic_outb(p, 0, RETURN_1);
4237 scb_index = aic_inb(p, SCB_TAG); 4235 scb_index = aic_inb(p, SCB_TAG);
4238 if (scb_index > p->scb_data->numscbs) 4236 if (scb_index > p->scb_data->numscbs)
@@ -5800,9 +5798,9 @@ aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
5800 } 5798 }
5801 else if ((status & SELTO) != 0) 5799 else if ((status & SELTO) != 0)
5802 { 5800 {
5803 unsigned char scbptr; 5801 unsigned char scbptr;
5804 unsigned char nextscb; 5802 unsigned char nextscb;
5805 Scsi_Cmnd *cmd; 5803 struct scsi_cmnd *cmd;
5806 5804
5807 scbptr = aic_inb(p, WAITING_SCBH); 5805 scbptr = aic_inb(p, WAITING_SCBH);
5808 if (scbptr > p->scb_data->maxhscbs) 5806 if (scbptr > p->scb_data->maxhscbs)
@@ -5941,11 +5939,11 @@ aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
5941 /* 5939 /*
5942 * Determine the bus phase and queue an appropriate message. 5940 * Determine the bus phase and queue an appropriate message.
5943 */ 5941 */
5944 char *phase; 5942 char *phase;
5945 Scsi_Cmnd *cmd; 5943 struct scsi_cmnd *cmd;
5946 unsigned char mesg_out = MSG_NOOP; 5944 unsigned char mesg_out = MSG_NOOP;
5947 unsigned char lastphase = aic_inb(p, LASTPHASE); 5945 unsigned char lastphase = aic_inb(p, LASTPHASE);
5948 unsigned char sstat2 = aic_inb(p, SSTAT2); 5946 unsigned char sstat2 = aic_inb(p, SSTAT2);
5949 5947
5950 cmd = scb->cmd; 5948 cmd = scb->cmd;
5951 switch (lastphase) 5949 switch (lastphase)
@@ -6248,10 +6246,10 @@ aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer)
6248static void 6246static void
6249aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p) 6247aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p)
6250{ 6248{
6251 struct aic7xxx_scb *scb = NULL; 6249 struct aic7xxx_scb *scb = NULL;
6252 struct aic_dev_data *aic_dev; 6250 struct aic_dev_data *aic_dev;
6253 Scsi_Cmnd *cmd; 6251 struct scsi_cmnd *cmd;
6254 unsigned char scb_index, tindex; 6252 unsigned char scb_index, tindex;
6255 6253
6256#ifdef AIC7XXX_VERBOSE_DEBUGGING 6254#ifdef AIC7XXX_VERBOSE_DEBUGGING
6257 if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) ) 6255 if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) )
@@ -10131,9 +10129,8 @@ skip_pci_controller:
10131 * Description: 10129 * Description:
10132 * Build a SCB. 10130 * Build a SCB.
10133 *-F*************************************************************************/ 10131 *-F*************************************************************************/
10134static void 10132static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10135aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd, 10133 struct aic7xxx_scb *scb)
10136 struct aic7xxx_scb *scb)
10137{ 10134{
10138 unsigned short mask; 10135 unsigned short mask;
10139 struct aic7xxx_hwscb *hscb; 10136 struct aic7xxx_hwscb *hscb;
@@ -10285,8 +10282,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
10285 * Description: 10282 * Description:
10286 * Queue a SCB to the controller. 10283 * Queue a SCB to the controller.
10287 *-F*************************************************************************/ 10284 *-F*************************************************************************/
10288static int 10285static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
10289aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
10290{ 10286{
10291 struct aic7xxx_host *p; 10287 struct aic7xxx_host *p;
10292 struct aic7xxx_scb *scb; 10288 struct aic7xxx_scb *scb;
@@ -10319,11 +10315,11 @@ aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
10319 } 10315 }
10320 scb->cmd = cmd; 10316 scb->cmd = cmd;
10321 10317
10322 /* 10318 /*
10323 * Make sure the Scsi_Cmnd pointer is saved, the struct it points to 10319 * Make sure the scsi_cmnd pointer is saved, the struct it points to
10324 * is set up properly, and the parity error flag is reset, then send 10320 * is set up properly, and the parity error flag is reset, then send
10325 * the SCB to the sequencer and watch the fun begin. 10321 * the SCB to the sequencer and watch the fun begin.
10326 */ 10322 */
10327 aic7xxx_position(cmd) = scb->hscb->tag; 10323 aic7xxx_position(cmd) = scb->hscb->tag;
10328 cmd->scsi_done = fn; 10324 cmd->scsi_done = fn;
10329 cmd->result = DID_OK; 10325 cmd->result = DID_OK;
@@ -10356,8 +10352,7 @@ aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
10356 * aborted, then we will reset the channel and have all devices renegotiate. 10352 * aborted, then we will reset the channel and have all devices renegotiate.
10357 * Returns an enumerated type that indicates the status of the operation. 10353 * Returns an enumerated type that indicates the status of the operation.
10358 *-F*************************************************************************/ 10354 *-F*************************************************************************/
10359static int 10355static int __aic7xxx_bus_device_reset(struct scsi_cmnd *cmd)
10360__aic7xxx_bus_device_reset(Scsi_Cmnd *cmd)
10361{ 10356{
10362 struct aic7xxx_host *p; 10357 struct aic7xxx_host *p;
10363 struct aic7xxx_scb *scb; 10358 struct aic7xxx_scb *scb;
@@ -10550,8 +10545,7 @@ __aic7xxx_bus_device_reset(Scsi_Cmnd *cmd)
10550 return SUCCESS; 10545 return SUCCESS;
10551} 10546}
10552 10547
10553static int 10548static int aic7xxx_bus_device_reset(struct scsi_cmnd *cmd)
10554aic7xxx_bus_device_reset(Scsi_Cmnd *cmd)
10555{ 10549{
10556 int rc; 10550 int rc;
10557 10551
@@ -10570,8 +10564,7 @@ aic7xxx_bus_device_reset(Scsi_Cmnd *cmd)
10570 * Description: 10564 * Description:
10571 * Abort the current SCSI command(s). 10565 * Abort the current SCSI command(s).
10572 *-F*************************************************************************/ 10566 *-F*************************************************************************/
10573static void 10567static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd)
10574aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
10575{ 10568{
10576 10569
10577 printk("aic7xxx driver version %s\n", AIC7XXX_C_VERSION); 10570 printk("aic7xxx driver version %s\n", AIC7XXX_C_VERSION);
@@ -10595,8 +10588,7 @@ aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
10595 * Description: 10588 * Description:
10596 * Abort the current SCSI command(s). 10589 * Abort the current SCSI command(s).
10597 *-F*************************************************************************/ 10590 *-F*************************************************************************/
10598static int 10591static int __aic7xxx_abort(struct scsi_cmnd *cmd)
10599__aic7xxx_abort(Scsi_Cmnd *cmd)
10600{ 10592{
10601 struct aic7xxx_scb *scb = NULL; 10593 struct aic7xxx_scb *scb = NULL;
10602 struct aic7xxx_host *p; 10594 struct aic7xxx_host *p;
@@ -10813,8 +10805,7 @@ success:
10813 return SUCCESS; 10805 return SUCCESS;
10814} 10806}
10815 10807
10816static int 10808static int aic7xxx_abort(struct scsi_cmnd *cmd)
10817aic7xxx_abort(Scsi_Cmnd *cmd)
10818{ 10809{
10819 int rc; 10810 int rc;
10820 10811
@@ -10836,8 +10827,7 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
10836 * DEVICE RESET message - on the offending target before pulling 10827 * DEVICE RESET message - on the offending target before pulling
10837 * the SCSI bus reset line. 10828 * the SCSI bus reset line.
10838 *-F*************************************************************************/ 10829 *-F*************************************************************************/
10839static int 10830static int aic7xxx_reset(struct scsi_cmnd *cmd)
10840aic7xxx_reset(Scsi_Cmnd *cmd)
10841{ 10831{
10842 struct aic7xxx_scb *scb; 10832 struct aic7xxx_scb *scb;
10843 struct aic7xxx_host *p; 10833 struct aic7xxx_host *p;
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
index 0ed391d8ee8..c83fe751d0b 100644
--- a/drivers/scsi/aic94xx/Kconfig
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -28,6 +28,7 @@ config SCSI_AIC94XX
28 tristate "Adaptec AIC94xx SAS/SATA support" 28 tristate "Adaptec AIC94xx SAS/SATA support"
29 depends on PCI 29 depends on PCI
30 select SCSI_SAS_LIBSAS 30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
31 help 32 help
32 This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X 33 This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X
33 AIC94xx chip based host adapters. 34 AIC94xx chip based host adapters.
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 734adc9d520..99743ca29ca 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -309,11 +309,29 @@ static ssize_t asd_show_dev_pcba_sn(struct device *dev,
309} 309}
310static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); 310static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
311 311
312static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha) 312static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
313{ 313{
314 device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); 314 int err;
315 device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 315
316 device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 316 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
317 if (err)
318 return err;
319
320 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
321 if (err)
322 goto err_rev;
323
324 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
325 if (err)
326 goto err_biosb;
327
328 return 0;
329
330err_biosb:
331 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
332err_rev:
333 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
334 return err;
317} 335}
318 336
319static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) 337static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
@@ -645,7 +663,9 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
645 } 663 }
646 ASD_DPRINTK("escbs posted\n"); 664 ASD_DPRINTK("escbs posted\n");
647 665
648 asd_create_dev_attrs(asd_ha); 666 err = asd_create_dev_attrs(asd_ha);
667 if (err)
668 goto Err_dev_attrs;
649 669
650 err = asd_register_sas_ha(asd_ha); 670 err = asd_register_sas_ha(asd_ha);
651 if (err) 671 if (err)
@@ -668,6 +688,7 @@ Err_en_phys:
668 asd_unregister_sas_ha(asd_ha); 688 asd_unregister_sas_ha(asd_ha);
669Err_reg_sas: 689Err_reg_sas:
670 asd_remove_dev_attrs(asd_ha); 690 asd_remove_dev_attrs(asd_ha);
691Err_dev_attrs:
671Err_escbs: 692Err_escbs:
672 asd_disable_ints(asd_ha); 693 asd_disable_ints(asd_ha);
673 free_irq(dev->irq, asd_ha); 694 free_irq(dev->irq, asd_ha);
@@ -754,9 +775,9 @@ static ssize_t asd_version_show(struct device_driver *driver, char *buf)
754} 775}
755static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL); 776static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
756 777
757static void asd_create_driver_attrs(struct device_driver *driver) 778static int asd_create_driver_attrs(struct device_driver *driver)
758{ 779{
759 driver_create_file(driver, &driver_attr_version); 780 return driver_create_file(driver, &driver_attr_version);
760} 781}
761 782
762static void asd_remove_driver_attrs(struct device_driver *driver) 783static void asd_remove_driver_attrs(struct device_driver *driver)
@@ -834,10 +855,14 @@ static int __init aic94xx_init(void)
834 if (err) 855 if (err)
835 goto out_release_transport; 856 goto out_release_transport;
836 857
837 asd_create_driver_attrs(&aic94xx_pci_driver.driver); 858 err = asd_create_driver_attrs(&aic94xx_pci_driver.driver);
859 if (err)
860 goto out_unregister_pcidrv;
838 861
839 return err; 862 return err;
840 863
864 out_unregister_pcidrv:
865 pci_unregister_driver(&aic94xx_pci_driver);
841 out_release_transport: 866 out_release_transport:
842 sas_release_transport(aic94xx_transport_template); 867 sas_release_transport(aic94xx_transport_template);
843 out_destroy_caches: 868 out_destroy_caches:
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 7621e3fa37b..0525d672e1e 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -194,7 +194,8 @@
194unsigned int sdtr_period = SDTR_PERIOD; 194unsigned int sdtr_period = SDTR_PERIOD;
195unsigned int sdtr_size = SDTR_SIZE; 195unsigned int sdtr_size = SDTR_SIZE;
196 196
197static void acornscsi_done(AS_Host *host, Scsi_Cmnd **SCpntp, unsigned int result); 197static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
198 unsigned int result);
198static int acornscsi_reconnect_finish(AS_Host *host); 199static int acornscsi_reconnect_finish(AS_Host *host);
199static void acornscsi_dma_cleanup(AS_Host *host); 200static void acornscsi_dma_cleanup(AS_Host *host);
200static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); 201static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
@@ -712,7 +713,7 @@ static
712intr_ret_t acornscsi_kick(AS_Host *host) 713intr_ret_t acornscsi_kick(AS_Host *host)
713{ 714{
714 int from_queue = 0; 715 int from_queue = 0;
715 Scsi_Cmnd *SCpnt; 716 struct scsi_cmnd *SCpnt;
716 717
717 /* first check to see if a command is waiting to be executed */ 718 /* first check to see if a command is waiting to be executed */
718 SCpnt = host->origSCpnt; 719 SCpnt = host->origSCpnt;
@@ -796,15 +797,15 @@ intr_ret_t acornscsi_kick(AS_Host *host)
796} 797}
797 798
798/* 799/*
799 * Function: void acornscsi_done(AS_Host *host, Scsi_Cmnd **SCpntp, unsigned int result) 800 * Function: void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result)
800 * Purpose : complete processing for command 801 * Purpose : complete processing for command
801 * Params : host - interface that completed 802 * Params : host - interface that completed
802 * result - driver byte of result 803 * result - driver byte of result
803 */ 804 */
804static 805static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
805void acornscsi_done(AS_Host *host, Scsi_Cmnd **SCpntp, unsigned int result) 806 unsigned int result)
806{ 807{
807 Scsi_Cmnd *SCpnt = *SCpntp; 808 struct scsi_cmnd *SCpnt = *SCpntp;
808 809
809 /* clean up */ 810 /* clean up */
810 sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); 811 sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
@@ -1318,7 +1319,7 @@ acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int
1318static void 1319static void
1319acornscsi_sendcommand(AS_Host *host) 1320acornscsi_sendcommand(AS_Host *host)
1320{ 1321{
1321 Scsi_Cmnd *SCpnt = host->SCpnt; 1322 struct scsi_cmnd *SCpnt = host->SCpnt;
1322 1323
1323 sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0); 1324 sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0);
1324 sbic_arm_writenext(host->scsi.io_port, 0); 1325 sbic_arm_writenext(host->scsi.io_port, 0);
@@ -1693,7 +1694,7 @@ void acornscsi_message(AS_Host *host)
1693 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); 1694 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
1694 msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1695 msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
1695 } else { 1696 } else {
1696 Scsi_Cmnd *SCpnt = host->SCpnt; 1697 struct scsi_cmnd *SCpnt = host->SCpnt;
1697 1698
1698 acornscsi_dma_cleanup(host); 1699 acornscsi_dma_cleanup(host);
1699 1700
@@ -2509,13 +2510,14 @@ acornscsi_intr(int irq, void *dev_id, struct pt_regs *regs)
2509 */ 2510 */
2510 2511
2511/* 2512/*
2512 * Function : acornscsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) 2513 * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2513 * Purpose : queues a SCSI command 2514 * Purpose : queues a SCSI command
2514 * Params : cmd - SCSI command 2515 * Params : cmd - SCSI command
2515 * done - function called on completion, with pointer to command descriptor 2516 * done - function called on completion, with pointer to command descriptor
2516 * Returns : 0, or < 0 on error. 2517 * Returns : 0, or < 0 on error.
2517 */ 2518 */
2518int acornscsi_queuecmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 2519int acornscsi_queuecmd(struct scsi_cmnd *SCpnt,
2520 void (*done)(struct scsi_cmnd *))
2519{ 2521{
2520 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; 2522 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
2521 2523
@@ -2565,17 +2567,18 @@ int acornscsi_queuecmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
2565} 2567}
2566 2568
2567/* 2569/*
2568 * Prototype: void acornscsi_reportstatus(Scsi_Cmnd **SCpntp1, Scsi_Cmnd **SCpntp2, int result) 2570 * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
2569 * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 2571 * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
2570 * Params : SCpntp1 - pointer to command to return 2572 * Params : SCpntp1 - pointer to command to return
2571 * SCpntp2 - pointer to command to check 2573 * SCpntp2 - pointer to command to check
2572 * result - result to pass back to mid-level done function 2574 * result - result to pass back to mid-level done function
2573 * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2. 2575 * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
2574 */ 2576 */
2575static inline 2577static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
2576void acornscsi_reportstatus(Scsi_Cmnd **SCpntp1, Scsi_Cmnd **SCpntp2, int result) 2578 struct scsi_cmnd **SCpntp2,
2579 int result)
2577{ 2580{
2578 Scsi_Cmnd *SCpnt = *SCpntp1; 2581 struct scsi_cmnd *SCpnt = *SCpntp1;
2579 2582
2580 if (SCpnt) { 2583 if (SCpnt) {
2581 *SCpntp1 = NULL; 2584 *SCpntp1 = NULL;
@@ -2591,13 +2594,12 @@ void acornscsi_reportstatus(Scsi_Cmnd **SCpntp1, Scsi_Cmnd **SCpntp2, int result
2591enum res_abort { res_not_running, res_success, res_success_clear, res_snooze }; 2594enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
2592 2595
2593/* 2596/*
2594 * Prototype: enum res acornscsi_do_abort(Scsi_Cmnd *SCpnt) 2597 * Prototype: enum res acornscsi_do_abort(struct scsi_cmnd *SCpnt)
2595 * Purpose : abort a command on this host 2598 * Purpose : abort a command on this host
2596 * Params : SCpnt - command to abort 2599 * Params : SCpnt - command to abort
2597 * Returns : our abort status 2600 * Returns : our abort status
2598 */ 2601 */
2599static enum res_abort 2602static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
2600acornscsi_do_abort(AS_Host *host, Scsi_Cmnd *SCpnt)
2601{ 2603{
2602 enum res_abort res = res_not_running; 2604 enum res_abort res = res_not_running;
2603 2605
@@ -2684,12 +2686,12 @@ acornscsi_do_abort(AS_Host *host, Scsi_Cmnd *SCpnt)
2684} 2686}
2685 2687
2686/* 2688/*
2687 * Prototype: int acornscsi_abort(Scsi_Cmnd *SCpnt) 2689 * Prototype: int acornscsi_abort(struct scsi_cmnd *SCpnt)
2688 * Purpose : abort a command on this host 2690 * Purpose : abort a command on this host
2689 * Params : SCpnt - command to abort 2691 * Params : SCpnt - command to abort
2690 * Returns : one of SCSI_ABORT_ macros 2692 * Returns : one of SCSI_ABORT_ macros
2691 */ 2693 */
2692int acornscsi_abort(Scsi_Cmnd *SCpnt) 2694int acornscsi_abort(struct scsi_cmnd *SCpnt)
2693{ 2695{
2694 AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; 2696 AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata;
2695 int result; 2697 int result;
@@ -2770,16 +2772,16 @@ int acornscsi_abort(Scsi_Cmnd *SCpnt)
2770} 2772}
2771 2773
2772/* 2774/*
2773 * Prototype: int acornscsi_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags) 2775 * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
2774 * Purpose : reset a command on this host/reset this host 2776 * Purpose : reset a command on this host/reset this host
2775 * Params : SCpnt - command causing reset 2777 * Params : SCpnt - command causing reset
2776 * result - what type of reset to perform 2778 * result - what type of reset to perform
2777 * Returns : one of SCSI_RESET_ macros 2779 * Returns : one of SCSI_RESET_ macros
2778 */ 2780 */
2779int acornscsi_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags) 2781int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
2780{ 2782{
2781 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; 2783 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
2782 Scsi_Cmnd *SCptr; 2784 struct scsi_cmnd *SCptr;
2783 2785
2784 host->stats.resets += 1; 2786 host->stats.resets += 1;
2785 2787
diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h
index 2142290f840..d11424b89f4 100644
--- a/drivers/scsi/arm/acornscsi.h
+++ b/drivers/scsi/arm/acornscsi.h
@@ -277,8 +277,8 @@ struct status_entry {
277typedef struct acornscsi_hostdata { 277typedef struct acornscsi_hostdata {
278 /* miscellaneous */ 278 /* miscellaneous */
279 struct Scsi_Host *host; /* host */ 279 struct Scsi_Host *host; /* host */
280 Scsi_Cmnd *SCpnt; /* currently processing command */ 280 struct scsi_cmnd *SCpnt; /* currently processing command */
281 Scsi_Cmnd *origSCpnt; /* original connecting command */ 281 struct scsi_cmnd *origSCpnt; /* original connecting command */
282 282
283 /* driver information */ 283 /* driver information */
284 struct { 284 struct {
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 4cf7afc31cc..e05f0c2fc91 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -297,8 +297,8 @@ fas216_do_log(FAS216_Info *info, char target, char *fmt, va_list ap)
297 printk("scsi%d.%c: %s", info->host->host_no, target, buf); 297 printk("scsi%d.%c: %s", info->host->host_no, target, buf);
298} 298}
299 299
300static void 300static void fas216_log_command(FAS216_Info *info, int level,
301fas216_log_command(FAS216_Info *info, int level, Scsi_Cmnd *SCpnt, char *fmt, ...) 301 struct scsi_cmnd *SCpnt, char *fmt, ...)
302{ 302{
303 va_list args; 303 va_list args;
304 304
@@ -1662,7 +1662,7 @@ irqreturn_t fas216_intr(FAS216_Info *info)
1662 return handled; 1662 return handled;
1663} 1663}
1664 1664
1665static void __fas216_start_command(FAS216_Info *info, Scsi_Cmnd *SCpnt) 1665static void __fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
1666{ 1666{
1667 int tot_msglen; 1667 int tot_msglen;
1668 1668
@@ -1754,7 +1754,7 @@ static int parity_test(FAS216_Info *info, int target)
1754 return info->device[target].parity_check; 1754 return info->device[target].parity_check;
1755} 1755}
1756 1756
1757static void fas216_start_command(FAS216_Info *info, Scsi_Cmnd *SCpnt) 1757static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
1758{ 1758{
1759 int disconnect_ok; 1759 int disconnect_ok;
1760 1760
@@ -1808,7 +1808,7 @@ static void fas216_start_command(FAS216_Info *info, Scsi_Cmnd *SCpnt)
1808 __fas216_start_command(info, SCpnt); 1808 __fas216_start_command(info, SCpnt);
1809} 1809}
1810 1810
1811static void fas216_allocate_tag(FAS216_Info *info, Scsi_Cmnd *SCpnt) 1811static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
1812{ 1812{
1813#ifdef SCSI2_TAG 1813#ifdef SCSI2_TAG
1814 /* 1814 /*
@@ -1842,7 +1842,8 @@ static void fas216_allocate_tag(FAS216_Info *info, Scsi_Cmnd *SCpnt)
1842 } 1842 }
1843} 1843}
1844 1844
1845static void fas216_do_bus_device_reset(FAS216_Info *info, Scsi_Cmnd *SCpnt) 1845static void fas216_do_bus_device_reset(FAS216_Info *info,
1846 struct scsi_cmnd *SCpnt)
1846{ 1847{
1847 struct message *msg; 1848 struct message *msg;
1848 1849
@@ -1890,7 +1891,7 @@ static void fas216_do_bus_device_reset(FAS216_Info *info, Scsi_Cmnd *SCpnt)
1890 */ 1891 */
1891static void fas216_kick(FAS216_Info *info) 1892static void fas216_kick(FAS216_Info *info)
1892{ 1893{
1893 Scsi_Cmnd *SCpnt = NULL; 1894 struct scsi_cmnd *SCpnt = NULL;
1894#define TYPE_OTHER 0 1895#define TYPE_OTHER 0
1895#define TYPE_RESET 1 1896#define TYPE_RESET 1
1896#define TYPE_QUEUE 2 1897#define TYPE_QUEUE 2
@@ -1978,8 +1979,8 @@ static void fas216_kick(FAS216_Info *info)
1978/* 1979/*
1979 * Clean up from issuing a BUS DEVICE RESET message to a device. 1980 * Clean up from issuing a BUS DEVICE RESET message to a device.
1980 */ 1981 */
1981static void 1982static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
1982fas216_devicereset_done(FAS216_Info *info, Scsi_Cmnd *SCpnt, unsigned int result) 1983 unsigned int result)
1983{ 1984{
1984 fas216_log(info, LOG_ERROR, "fas216 device reset complete"); 1985 fas216_log(info, LOG_ERROR, "fas216 device reset complete");
1985 1986
@@ -1996,8 +1997,8 @@ fas216_devicereset_done(FAS216_Info *info, Scsi_Cmnd *SCpnt, unsigned int result
1996 * 1997 *
1997 * Finish processing automatic request sense command 1998 * Finish processing automatic request sense command
1998 */ 1999 */
1999static void 2000static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
2000fas216_rq_sns_done(FAS216_Info *info, Scsi_Cmnd *SCpnt, unsigned int result) 2001 unsigned int result)
2001{ 2002{
2002 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, 2003 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
2003 "request sense complete, result=0x%04x%02x%02x", 2004 "request sense complete, result=0x%04x%02x%02x",
@@ -2030,7 +2031,7 @@ fas216_rq_sns_done(FAS216_Info *info, Scsi_Cmnd *SCpnt, unsigned int result)
2030 * Finish processing of standard command 2031 * Finish processing of standard command
2031 */ 2032 */
2032static void 2033static void
2033fas216_std_done(FAS216_Info *info, Scsi_Cmnd *SCpnt, unsigned int result) 2034fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
2034{ 2035{
2035 info->stats.fins += 1; 2036 info->stats.fins += 1;
2036 2037
@@ -2142,8 +2143,8 @@ request_sense:
2142 */ 2143 */
2143static void fas216_done(FAS216_Info *info, unsigned int result) 2144static void fas216_done(FAS216_Info *info, unsigned int result)
2144{ 2145{
2145 void (*fn)(FAS216_Info *, Scsi_Cmnd *, unsigned int); 2146 void (*fn)(FAS216_Info *, struct scsi_cmnd *, unsigned int);
2146 Scsi_Cmnd *SCpnt; 2147 struct scsi_cmnd *SCpnt;
2147 unsigned long flags; 2148 unsigned long flags;
2148 2149
2149 fas216_checkmagic(info); 2150 fas216_checkmagic(info);
@@ -2182,7 +2183,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
2182 info->device[SCpnt->device->id].parity_check = 0; 2183 info->device[SCpnt->device->id].parity_check = 0;
2183 clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns); 2184 clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns);
2184 2185
2185 fn = (void (*)(FAS216_Info *, Scsi_Cmnd *, unsigned int))SCpnt->host_scribble; 2186 fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
2186 fn(info, SCpnt, result); 2187 fn(info, SCpnt, result);
2187 2188
2188 if (info->scsi.irq != NO_IRQ) { 2189 if (info->scsi.irq != NO_IRQ) {
@@ -2207,7 +2208,8 @@ no_command:
2207 * Returns: 0 on success, else error. 2208 * Returns: 0 on success, else error.
2208 * Notes: io_request_lock is held, interrupts are disabled. 2209 * Notes: io_request_lock is held, interrupts are disabled.
2209 */ 2210 */
2210int fas216_queue_command(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 2211int fas216_queue_command(struct scsi_cmnd *SCpnt,
2212 void (*done)(struct scsi_cmnd *))
2211{ 2213{
2212 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2214 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2213 int result; 2215 int result;
@@ -2254,7 +2256,7 @@ int fas216_queue_command(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
2254 * 2256 *
2255 * Trigger restart of a waiting thread in fas216_command 2257 * Trigger restart of a waiting thread in fas216_command
2256 */ 2258 */
2257static void fas216_internal_done(Scsi_Cmnd *SCpnt) 2259static void fas216_internal_done(struct scsi_cmnd *SCpnt)
2258{ 2260{
2259 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2261 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2260 2262
@@ -2271,7 +2273,8 @@ static void fas216_internal_done(Scsi_Cmnd *SCpnt)
2271 * Returns: scsi result code. 2273 * Returns: scsi result code.
2272 * Notes: io_request_lock is held, interrupts are disabled. 2274 * Notes: io_request_lock is held, interrupts are disabled.
2273 */ 2275 */
2274int fas216_noqueue_command(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 2276int fas216_noqueue_command(struct scsi_cmnd *SCpnt,
2277 void (*done)(struct scsi_cmnd *))
2275{ 2278{
2276 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2279 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2277 2280
@@ -2350,7 +2353,8 @@ enum res_find {
2350 * Decide how to abort a command. 2353 * Decide how to abort a command.
2351 * Returns: abort status 2354 * Returns: abort status
2352 */ 2355 */
2353static enum res_find fas216_find_command(FAS216_Info *info, Scsi_Cmnd *SCpnt) 2356static enum res_find fas216_find_command(FAS216_Info *info,
2357 struct scsi_cmnd *SCpnt)
2354{ 2358{
2355 enum res_find res = res_failed; 2359 enum res_find res = res_failed;
2356 2360
@@ -2417,7 +2421,7 @@ static enum res_find fas216_find_command(FAS216_Info *info, Scsi_Cmnd *SCpnt)
2417 * Returns: FAILED if unable to abort 2421 * Returns: FAILED if unable to abort
2418 * Notes: io_request_lock is taken, and irqs are disabled 2422 * Notes: io_request_lock is taken, and irqs are disabled
2419 */ 2423 */
2420int fas216_eh_abort(Scsi_Cmnd *SCpnt) 2424int fas216_eh_abort(struct scsi_cmnd *SCpnt)
2421{ 2425{
2422 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2426 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2423 int result = FAILED; 2427 int result = FAILED;
@@ -2474,7 +2478,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2474 * Notes: We won't be re-entered, so we'll only have one device 2478 * Notes: We won't be re-entered, so we'll only have one device
2475 * reset on the go at one time. 2479 * reset on the go at one time.
2476 */ 2480 */
2477int fas216_eh_device_reset(Scsi_Cmnd *SCpnt) 2481int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
2478{ 2482{
2479 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2483 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2480 unsigned long flags; 2484 unsigned long flags;
@@ -2555,7 +2559,7 @@ int fas216_eh_device_reset(Scsi_Cmnd *SCpnt)
2555 * Returns: FAILED if unable to reset. 2559 * Returns: FAILED if unable to reset.
2556 * Notes: Further commands are blocked. 2560 * Notes: Further commands are blocked.
2557 */ 2561 */
2558int fas216_eh_bus_reset(Scsi_Cmnd *SCpnt) 2562int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
2559{ 2563{
2560 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2564 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2561 unsigned long flags; 2565 unsigned long flags;
@@ -2655,7 +2659,7 @@ static void fas216_init_chip(FAS216_Info *info)
2655 * Returns: FAILED if unable to reset. 2659 * Returns: FAILED if unable to reset.
2656 * Notes: io_request_lock is taken, and irqs are disabled 2660 * Notes: io_request_lock is taken, and irqs are disabled
2657 */ 2661 */
2658int fas216_eh_host_reset(Scsi_Cmnd *SCpnt) 2662int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
2659{ 2663{
2660 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2664 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
2661 2665
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 540914d6fd3..00e5f055afd 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -218,11 +218,11 @@ typedef struct {
218 unsigned long magic_start; 218 unsigned long magic_start;
219 spinlock_t host_lock; 219 spinlock_t host_lock;
220 struct Scsi_Host *host; /* host */ 220 struct Scsi_Host *host; /* host */
221 Scsi_Cmnd *SCpnt; /* currently processing command */ 221 struct scsi_cmnd *SCpnt; /* currently processing command */
222 Scsi_Cmnd *origSCpnt; /* original connecting command */ 222 struct scsi_cmnd *origSCpnt; /* original connecting command */
223 Scsi_Cmnd *reqSCpnt; /* request sense command */ 223 struct scsi_cmnd *reqSCpnt; /* request sense command */
224 Scsi_Cmnd *rstSCpnt; /* reset command */ 224 struct scsi_cmnd *rstSCpnt; /* reset command */
225 Scsi_Cmnd *pending_SCpnt[8]; /* per-device pending commands */ 225 struct scsi_cmnd *pending_SCpnt[8]; /* per-device pending commands */
226 int next_pending; /* next pending device */ 226 int next_pending; /* next pending device */
227 227
228 /* 228 /*
@@ -328,21 +328,23 @@ extern int fas216_init (struct Scsi_Host *instance);
328 */ 328 */
329extern int fas216_add (struct Scsi_Host *instance, struct device *dev); 329extern int fas216_add (struct Scsi_Host *instance, struct device *dev);
330 330
331/* Function: int fas216_queue_command (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 331/* Function: int fas216_queue_command(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
332 * Purpose : queue a command for adapter to process. 332 * Purpose : queue a command for adapter to process.
333 * Params : SCpnt - Command to queue 333 * Params : SCpnt - Command to queue
334 * done - done function to call once command is complete 334 * done - done function to call once command is complete
335 * Returns : 0 - success, else error 335 * Returns : 0 - success, else error
336 */ 336 */
337extern int fas216_queue_command (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 337extern int fas216_queue_command(struct scsi_cmnd *,
338 void (*done)(struct scsi_cmnd *));
338 339
339/* Function: int fas216_noqueue_command (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 340/* Function: int fas216_noqueue_command(istruct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
340 * Purpose : queue a command for adapter to process, and process it to completion. 341 * Purpose : queue a command for adapter to process, and process it to completion.
341 * Params : SCpnt - Command to queue 342 * Params : SCpnt - Command to queue
342 * done - done function to call once command is complete 343 * done - done function to call once command is complete
343 * Returns : 0 - success, else error 344 * Returns : 0 - success, else error
344 */ 345 */
345extern int fas216_noqueue_command (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 346extern int fas216_noqueue_command(struct scsi_cmnd *,
347 void (*done)(struct scsi_cmnd *));
346 348
347/* Function: irqreturn_t fas216_intr (FAS216_Info *info) 349/* Function: irqreturn_t fas216_intr (FAS216_Info *info)
348 * Purpose : handle interrupts from the interface to progress a command 350 * Purpose : handle interrupts from the interface to progress a command
@@ -363,32 +365,32 @@ extern int fas216_print_host(FAS216_Info *info, char *buffer);
363extern int fas216_print_stats(FAS216_Info *info, char *buffer); 365extern int fas216_print_stats(FAS216_Info *info, char *buffer);
364extern int fas216_print_devices(FAS216_Info *info, char *buffer); 366extern int fas216_print_devices(FAS216_Info *info, char *buffer);
365 367
366/* Function: int fas216_eh_abort(Scsi_Cmnd *SCpnt) 368/* Function: int fas216_eh_abort(struct scsi_cmnd *SCpnt)
367 * Purpose : abort this command 369 * Purpose : abort this command
368 * Params : SCpnt - command to abort 370 * Params : SCpnt - command to abort
369 * Returns : FAILED if unable to abort 371 * Returns : FAILED if unable to abort
370 */ 372 */
371extern int fas216_eh_abort(Scsi_Cmnd *SCpnt); 373extern int fas216_eh_abort(struct scsi_cmnd *SCpnt);
372 374
373/* Function: int fas216_eh_device_reset(Scsi_Cmnd *SCpnt) 375/* Function: int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
374 * Purpose : Reset the device associated with this command 376 * Purpose : Reset the device associated with this command
375 * Params : SCpnt - command specifing device to reset 377 * Params : SCpnt - command specifing device to reset
376 * Returns : FAILED if unable to reset 378 * Returns : FAILED if unable to reset
377 */ 379 */
378extern int fas216_eh_device_reset(Scsi_Cmnd *SCpnt); 380extern int fas216_eh_device_reset(struct scsi_cmnd *SCpnt);
379 381
380/* Function: int fas216_eh_bus_reset(Scsi_Cmnd *SCpnt) 382/* Function: int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
381 * Purpose : Reset the complete bus associated with this command 383 * Purpose : Reset the complete bus associated with this command
382 * Params : SCpnt - command specifing bus to reset 384 * Params : SCpnt - command specifing bus to reset
383 * Returns : FAILED if unable to reset 385 * Returns : FAILED if unable to reset
384 */ 386 */
385extern int fas216_eh_bus_reset(Scsi_Cmnd *SCpnt); 387extern int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt);
386 388
387/* Function: int fas216_eh_host_reset(Scsi_Cmnd *SCpnt) 389/* Function: int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
388 * Purpose : Reset the host associated with this command 390 * Purpose : Reset the host associated with this command
389 * Params : SCpnt - command specifing host to reset 391 * Params : SCpnt - command specifing host to reset
390 * Returns : FAILED if unable to reset 392 * Returns : FAILED if unable to reset
391 */ 393 */
392extern int fas216_eh_host_reset(Scsi_Cmnd *SCpnt); 394extern int fas216_eh_host_reset(struct scsi_cmnd *SCpnt);
393 395
394#endif /* FAS216_H */ 396#endif /* FAS216_H */
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index 8caa5903ce3..cb11ccef54e 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -29,7 +29,7 @@
29 29
30typedef struct queue_entry { 30typedef struct queue_entry {
31 struct list_head list; 31 struct list_head list;
32 Scsi_Cmnd *SCpnt; 32 struct scsi_cmnd *SCpnt;
33#ifdef DEBUG 33#ifdef DEBUG
34 unsigned long magic; 34 unsigned long magic;
35#endif 35#endif
@@ -96,14 +96,14 @@ void queue_free (Queue_t *queue)
96 96
97 97
98/* 98/*
99 * Function: int queue_add_cmd(Queue_t *queue, Scsi_Cmnd *SCpnt, int head) 99 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
100 * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. 100 * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head.
101 * Params : queue - destination queue 101 * Params : queue - destination queue
102 * SCpnt - command to add 102 * SCpnt - command to add
103 * head - add command to head of queue 103 * head - add command to head of queue
104 * Returns : 0 on error, !0 on success 104 * Returns : 0 on error, !0 on success
105 */ 105 */
106int __queue_add(Queue_t *queue, Scsi_Cmnd *SCpnt, int head) 106int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
107{ 107{
108 unsigned long flags; 108 unsigned long flags;
109 struct list_head *l; 109 struct list_head *l;
@@ -134,7 +134,7 @@ empty:
134 return ret; 134 return ret;
135} 135}
136 136
137static Scsi_Cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) 137static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
138{ 138{
139 QE_t *q; 139 QE_t *q;
140 140
@@ -152,17 +152,17 @@ static Scsi_Cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
152} 152}
153 153
154/* 154/*
155 * Function: Scsi_Cmnd *queue_remove_exclude (queue, exclude) 155 * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude)
156 * Purpose : remove a SCSI command from a queue 156 * Purpose : remove a SCSI command from a queue
157 * Params : queue - queue to remove command from 157 * Params : queue - queue to remove command from
158 * exclude - bit array of target&lun which is busy 158 * exclude - bit array of target&lun which is busy
159 * Returns : Scsi_Cmnd if successful (and a reference), or NULL if no command available 159 * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
160 */ 160 */
161Scsi_Cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) 161struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 struct list_head *l; 164 struct list_head *l;
165 Scsi_Cmnd *SCpnt = NULL; 165 struct scsi_cmnd *SCpnt = NULL;
166 166
167 spin_lock_irqsave(&queue->queue_lock, flags); 167 spin_lock_irqsave(&queue->queue_lock, flags);
168 list_for_each(l, &queue->head) { 168 list_for_each(l, &queue->head) {
@@ -178,15 +178,15 @@ Scsi_Cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
178} 178}
179 179
180/* 180/*
181 * Function: Scsi_Cmnd *queue_remove (queue) 181 * Function: struct scsi_cmnd *queue_remove (queue)
182 * Purpose : removes first SCSI command from a queue 182 * Purpose : removes first SCSI command from a queue
183 * Params : queue - queue to remove command from 183 * Params : queue - queue to remove command from
184 * Returns : Scsi_Cmnd if successful (and a reference), or NULL if no command available 184 * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
185 */ 185 */
186Scsi_Cmnd *queue_remove(Queue_t *queue) 186struct scsi_cmnd *queue_remove(Queue_t *queue)
187{ 187{
188 unsigned long flags; 188 unsigned long flags;
189 Scsi_Cmnd *SCpnt = NULL; 189 struct scsi_cmnd *SCpnt = NULL;
190 190
191 spin_lock_irqsave(&queue->queue_lock, flags); 191 spin_lock_irqsave(&queue->queue_lock, flags);
192 if (!list_empty(&queue->head)) 192 if (!list_empty(&queue->head))
@@ -197,19 +197,20 @@ Scsi_Cmnd *queue_remove(Queue_t *queue)
197} 197}
198 198
199/* 199/*
200 * Function: Scsi_Cmnd *queue_remove_tgtluntag (queue, target, lun, tag) 200 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
201 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag 201 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
202 * Params : queue - queue to remove command from 202 * Params : queue - queue to remove command from
203 * target - target that we want 203 * target - target that we want
204 * lun - lun on device 204 * lun - lun on device
205 * tag - tag on device 205 * tag - tag on device
206 * Returns : Scsi_Cmnd if successful, or NULL if no command satisfies requirements 206 * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements
207 */ 207 */
208Scsi_Cmnd *queue_remove_tgtluntag (Queue_t *queue, int target, int lun, int tag) 208struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
209 int tag)
209{ 210{
210 unsigned long flags; 211 unsigned long flags;
211 struct list_head *l; 212 struct list_head *l;
212 Scsi_Cmnd *SCpnt = NULL; 213 struct scsi_cmnd *SCpnt = NULL;
213 214
214 spin_lock_irqsave(&queue->queue_lock, flags); 215 spin_lock_irqsave(&queue->queue_lock, flags);
215 list_for_each(l, &queue->head) { 216 list_for_each(l, &queue->head) {
@@ -275,13 +276,13 @@ int queue_probetgtlun (Queue_t *queue, int target, int lun)
275} 276}
276 277
277/* 278/*
278 * Function: int queue_remove_cmd(Queue_t *queue, Scsi_Cmnd *SCpnt) 279 * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
279 * Purpose : remove a specific command from the queues 280 * Purpose : remove a specific command from the queues
280 * Params : queue - queue to look in 281 * Params : queue - queue to look in
281 * SCpnt - command to find 282 * SCpnt - command to find
282 * Returns : 0 if not found 283 * Returns : 0 if not found
283 */ 284 */
284int queue_remove_cmd(Queue_t *queue, Scsi_Cmnd *SCpnt) 285int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
285{ 286{
286 unsigned long flags; 287 unsigned long flags;
287 struct list_head *l; 288 struct list_head *l;
diff --git a/drivers/scsi/arm/queue.h b/drivers/scsi/arm/queue.h
index 0c9dec4c171..3c519c9237b 100644
--- a/drivers/scsi/arm/queue.h
+++ b/drivers/scsi/arm/queue.h
@@ -32,46 +32,48 @@ extern int queue_initialise (Queue_t *queue);
32extern void queue_free (Queue_t *queue); 32extern void queue_free (Queue_t *queue);
33 33
34/* 34/*
35 * Function: Scsi_Cmnd *queue_remove (queue) 35 * Function: struct scsi_cmnd *queue_remove (queue)
36 * Purpose : removes first SCSI command from a queue 36 * Purpose : removes first SCSI command from a queue
37 * Params : queue - queue to remove command from 37 * Params : queue - queue to remove command from
38 * Returns : Scsi_Cmnd if successful (and a reference), or NULL if no command available 38 * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
39 */ 39 */
40extern Scsi_Cmnd *queue_remove (Queue_t *queue); 40extern struct scsi_cmnd *queue_remove (Queue_t *queue);
41 41
42/* 42/*
43 * Function: Scsi_Cmnd *queue_remove_exclude_ref (queue, exclude) 43 * Function: struct scsi_cmnd *queue_remove_exclude_ref (queue, exclude)
44 * Purpose : remove a SCSI command from a queue 44 * Purpose : remove a SCSI command from a queue
45 * Params : queue - queue to remove command from 45 * Params : queue - queue to remove command from
46 * exclude - array of busy LUNs 46 * exclude - array of busy LUNs
47 * Returns : Scsi_Cmnd if successful (and a reference), or NULL if no command available 47 * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
48 */ 48 */
49extern Scsi_Cmnd *queue_remove_exclude (Queue_t *queue, unsigned long *exclude); 49extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue,
50 unsigned long *exclude);
50 51
51#define queue_add_cmd_ordered(queue,SCpnt) \ 52#define queue_add_cmd_ordered(queue,SCpnt) \
52 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE) 53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE)
53#define queue_add_cmd_tail(queue,SCpnt) \ 54#define queue_add_cmd_tail(queue,SCpnt) \
54 __queue_add(queue,SCpnt,0) 55 __queue_add(queue,SCpnt,0)
55/* 56/*
56 * Function: int __queue_add(Queue_t *queue, Scsi_Cmnd *SCpnt, int head) 57 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
57 * Purpose : Add a new command onto a queue 58 * Purpose : Add a new command onto a queue
58 * Params : queue - destination queue 59 * Params : queue - destination queue
59 * SCpnt - command to add 60 * SCpnt - command to add
60 * head - add command to head of queue 61 * head - add command to head of queue
61 * Returns : 0 on error, !0 on success 62 * Returns : 0 on error, !0 on success
62 */ 63 */
63extern int __queue_add(Queue_t *queue, Scsi_Cmnd *SCpnt, int head); 64extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
64 65
65/* 66/*
66 * Function: Scsi_Cmnd *queue_remove_tgtluntag (queue, target, lun, tag) 67 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
67 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag 68 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
68 * Params : queue - queue to remove command from 69 * Params : queue - queue to remove command from
69 * target - target that we want 70 * target - target that we want
70 * lun - lun on device 71 * lun - lun on device
71 * tag - tag on device 72 * tag - tag on device
72 * Returns : Scsi_Cmnd if successful, or NULL if no command satisfies requirements 73 * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements
73 */ 74 */
74extern Scsi_Cmnd *queue_remove_tgtluntag (Queue_t *queue, int target, int lun, int tag); 75extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target,
76 int lun, int tag);
75 77
76/* 78/*
77 * Function: queue_remove_all_target(queue, target) 79 * Function: queue_remove_all_target(queue, target)
@@ -94,12 +96,12 @@ extern void queue_remove_all_target(Queue_t *queue, int target);
94extern int queue_probetgtlun (Queue_t *queue, int target, int lun); 96extern int queue_probetgtlun (Queue_t *queue, int target, int lun);
95 97
96/* 98/*
97 * Function: int queue_remove_cmd (Queue_t *queue, Scsi_Cmnd *SCpnt) 99 * Function: int queue_remove_cmd (Queue_t *queue, struct scsi_cmnd *SCpnt)
98 * Purpose : remove a specific command from the queues 100 * Purpose : remove a specific command from the queues
99 * Params : queue - queue to look in 101 * Params : queue - queue to look in
100 * SCpnt - command to find 102 * SCpnt - command to find
101 * Returns : 0 if not found 103 * Returns : 0 if not found
102 */ 104 */
103int queue_remove_cmd(Queue_t *queue, Scsi_Cmnd *SCpnt); 105int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt);
104 106
105#endif /* QUEUE_H */ 107#endif /* QUEUE_H */
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 8c2600ffc6a..3a39579bd08 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -66,7 +66,7 @@ static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
66 SCp->this_residual -= 1; 66 SCp->this_residual -= 1;
67} 67}
68 68
69static inline void init_SCp(Scsi_Cmnd *SCpnt) 69static inline void init_SCp(struct scsi_cmnd *SCpnt)
70{ 70{
71 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer)); 71 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
72 72
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index ff2b1796fa3..c6118d99385 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1219,7 +1219,7 @@ static void dump_register_info(struct AdapterCtlBlk *acb,
1219 srb, srb->cmd, srb->cmd->pid, 1219 srb, srb->cmd, srb->cmd->pid,
1220 srb->cmd->cmnd[0], srb->cmd->device->id, 1220 srb->cmd->cmnd[0], srb->cmd->device->id,
1221 srb->cmd->device->lun); 1221 srb->cmd->device->lun);
1222 printk(" sglist=%p cnt=%i idx=%i len=%i\n", 1222 printk(" sglist=%p cnt=%i idx=%i len=%Zd\n",
1223 srb->segment_x, srb->sg_count, srb->sg_index, 1223 srb->segment_x, srb->sg_count, srb->sg_index,
1224 srb->total_xfer_length); 1224 srb->total_xfer_length);
1225 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n", 1225 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
@@ -4949,7 +4949,7 @@ static struct pci_driver dc395x_driver = {
4949 **/ 4949 **/
4950static int __init dc395x_module_init(void) 4950static int __init dc395x_module_init(void)
4951{ 4951{
4952 return pci_module_init(&dc395x_driver); 4952 return pci_register_driver(&dc395x_driver);
4953} 4953}
4954 4954
4955 4955
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 879a2665767..fa738ec8692 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -155,7 +155,7 @@ static struct pci_driver dmx3191d_pci_driver = {
155 155
156static int __init dmx3191d_init(void) 156static int __init dmx3191d_init(void)
157{ 157{
158 return pci_module_init(&dmx3191d_pci_driver); 158 return pci_register_driver(&dmx3191d_pci_driver);
159} 159}
160 160
161static void __exit dmx3191d_exit(void) 161static void __exit dmx3191d_exit(void)
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index d84a281ad94..b3fa7ed71fa 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -47,21 +47,11 @@
47 * I2O Interface Objects 47 * I2O Interface Objects
48 */ 48 */
49 49
50#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
51
52#define DECLARE_MUTEX(name) struct semaphore name=MUTEX
53
54typedef struct wait_queue *adpt_wait_queue_head_t;
55#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) adpt_wait_queue_head_t wait = NULL
56typedef struct wait_queue adpt_wait_queue_t;
57#else
58
59#include <linux/wait.h> 50#include <linux/wait.h>
60typedef wait_queue_head_t adpt_wait_queue_head_t; 51typedef wait_queue_head_t adpt_wait_queue_head_t;
61#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD(wait) 52#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD(wait)
62typedef wait_queue_t adpt_wait_queue_t; 53typedef wait_queue_t adpt_wait_queue_t;
63 54
64#endif
65/* 55/*
66 * message structures 56 * message structures
67 */ 57 */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7b3bd34faf4..b20b37661d6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2212,7 +2212,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2212 */ 2212 */
2213 host->io_port = 0; 2213 host->io_port = 0;
2214 host->n_io_port = 0; 2214 host->n_io_port = 0;
2215 /* see comments in hosts.h */ 2215 /* see comments in scsi_host.h */
2216 host->max_id = 16; 2216 host->max_id = 16;
2217 host->max_lun = 256; 2217 host->max_lun = 256;
2218 host->max_channel = pHba->top_scsi_channel + 1; 2218 host->max_channel = pHba->top_scsi_channel + 1;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 2ad2a89b5db..28998326492 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -44,7 +44,7 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
44 44
45 45
46/* 46/*
47 * struct scsi_host_template (see hosts.h) 47 * struct scsi_host_template (see scsi/scsi_host.h)
48 */ 48 */
49 49
50#define DPT_DRIVER_NAME "Adaptec I2O RAID" 50#define DPT_DRIVER_NAME "Adaptec I2O RAID"
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 47eae029975..8c29eafd51c 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -936,18 +936,12 @@ typedef struct {
936 gdth_binfo_str binfo; /* controller info */ 936 gdth_binfo_str binfo; /* controller info */
937 gdth_evt_data dvr; /* event structure */ 937 gdth_evt_data dvr; /* event structure */
938 spinlock_t smp_lock; 938 spinlock_t smp_lock;
939#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
940 struct pci_dev *pdev; 939 struct pci_dev *pdev;
941#endif
942 char oem_name[8]; 940 char oem_name[8];
943#ifdef GDTH_DMA_STATISTICS 941#ifdef GDTH_DMA_STATISTICS
944 ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */ 942 ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
945#endif 943#endif
946#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
947 struct scsi_device *sdev; 944 struct scsi_device *sdev;
948#else
949 struct scsi_device sdev;
950#endif
951} gdth_ha_str; 945} gdth_ha_str;
952 946
953/* structure for scsi_register(), SCSI bus != 0 */ 947/* structure for scsi_register(), SCSI bus != 0 */
@@ -1029,10 +1023,6 @@ typedef struct {
1029 1023
1030/* function prototyping */ 1024/* function prototyping */
1031 1025
1032#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
1033int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int); 1026int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int);
1034#else
1035int gdth_proc_info(char *,char **,off_t,int,int,int);
1036#endif
1037 1027
1038#endif 1028#endif
diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h
deleted file mode 100644
index c27264bed5d..00000000000
--- a/drivers/scsi/hosts.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#warning "This file is obsolete, please use <scsi/scsi_host.h> instead"
2#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7ed4eef8347..e1fe9494125 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -70,6 +70,7 @@
70#include <linux/firmware.h> 70#include <linux/firmware.h>
71#include <linux/module.h> 71#include <linux/module.h>
72#include <linux/moduleparam.h> 72#include <linux/moduleparam.h>
73#include <linux/libata.h>
73#include <asm/io.h> 74#include <asm/io.h>
74#include <asm/irq.h> 75#include <asm/irq.h>
75#include <asm/processor.h> 76#include <asm/processor.h>
@@ -78,6 +79,7 @@
78#include <scsi/scsi_tcq.h> 79#include <scsi/scsi_tcq.h>
79#include <scsi/scsi_eh.h> 80#include <scsi/scsi_eh.h>
80#include <scsi/scsi_cmnd.h> 81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_transport.h>
81#include "ipr.h" 83#include "ipr.h"
82 84
83/* 85/*
@@ -199,6 +201,8 @@ struct ipr_error_table_t ipr_error_table[] = {
199 "FFFA: Undefined device response recovered by the IOA"}, 201 "FFFA: Undefined device response recovered by the IOA"},
200 {0x014A0000, 1, 1, 202 {0x014A0000, 1, 1,
201 "FFF6: Device bus error, message or command phase"}, 203 "FFF6: Device bus error, message or command phase"},
204 {0x014A8000, 0, 1,
205 "FFFE: Task Management Function failed"},
202 {0x015D0000, 0, 1, 206 {0x015D0000, 0, 1,
203 "FFF6: Failure prediction threshold exceeded"}, 207 "FFF6: Failure prediction threshold exceeded"},
204 {0x015D9200, 0, 1, 208 {0x015D9200, 0, 1,
@@ -261,6 +265,8 @@ struct ipr_error_table_t ipr_error_table[] = {
261 "Device bus status error"}, 265 "Device bus status error"},
262 {0x04448600, 0, 1, 266 {0x04448600, 0, 1,
263 "8157: IOA error requiring IOA reset to recover"}, 267 "8157: IOA error requiring IOA reset to recover"},
268 {0x04448700, 0, 0,
269 "ATA device status error"},
264 {0x04490000, 0, 0, 270 {0x04490000, 0, 0,
265 "Message reject received from the device"}, 271 "Message reject received from the device"},
266 {0x04449200, 0, 1, 272 {0x04449200, 0, 1,
@@ -273,6 +279,8 @@ struct ipr_error_table_t ipr_error_table[] = {
273 "9082: IOA detected device error"}, 279 "9082: IOA detected device error"},
274 {0x044A0000, 1, 1, 280 {0x044A0000, 1, 1,
275 "3110: Device bus error, message or command phase"}, 281 "3110: Device bus error, message or command phase"},
282 {0x044A8000, 1, 1,
283 "3110: SAS Command / Task Management Function failed"},
276 {0x04670400, 0, 1, 284 {0x04670400, 0, 1,
277 "9091: Incorrect hardware configuration change has been detected"}, 285 "9091: Incorrect hardware configuration change has been detected"},
278 {0x04678000, 0, 1, 286 {0x04678000, 0, 1,
@@ -453,7 +461,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
453 trace_entry->time = jiffies; 461 trace_entry->time = jiffies;
454 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 462 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
455 trace_entry->type = type; 463 trace_entry->type = type;
456 trace_entry->cmd_index = ipr_cmd->cmd_index; 464 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
465 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
457 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 466 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
458 trace_entry->u.add_data = add_data; 467 trace_entry->u.add_data = add_data;
459} 468}
@@ -480,8 +489,10 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
480 ioarcb->read_ioadl_len = 0; 489 ioarcb->read_ioadl_len = 0;
481 ioasa->ioasc = 0; 490 ioasa->ioasc = 0;
482 ioasa->residual_data_len = 0; 491 ioasa->residual_data_len = 0;
492 ioasa->u.gata.status = 0;
483 493
484 ipr_cmd->scsi_cmd = NULL; 494 ipr_cmd->scsi_cmd = NULL;
495 ipr_cmd->qc = NULL;
485 ipr_cmd->sense_buffer[0] = 0; 496 ipr_cmd->sense_buffer[0] = 0;
486 ipr_cmd->dma_use_sg = 0; 497 ipr_cmd->dma_use_sg = 0;
487} 498}
@@ -626,6 +637,28 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
626} 637}
627 638
628/** 639/**
640 * ipr_sata_eh_done - done function for aborted SATA commands
641 * @ipr_cmd: ipr command struct
642 *
643 * This function is invoked for ops generated to SATA
644 * devices which are being aborted.
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
652 struct ata_queued_cmd *qc = ipr_cmd->qc;
653 struct ipr_sata_port *sata_port = qc->ap->private_data;
654
655 qc->err_mask |= AC_ERR_OTHER;
656 sata_port->ioasa.status |= ATA_BUSY;
657 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658 ata_qc_complete(qc);
659}
660
661/**
629 * ipr_scsi_eh_done - mid-layer done function for aborted ops 662 * ipr_scsi_eh_done - mid-layer done function for aborted ops
630 * @ipr_cmd: ipr command struct 663 * @ipr_cmd: ipr command struct
631 * 664 *
@@ -669,6 +702,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
669 702
670 if (ipr_cmd->scsi_cmd) 703 if (ipr_cmd->scsi_cmd)
671 ipr_cmd->done = ipr_scsi_eh_done; 704 ipr_cmd->done = ipr_scsi_eh_done;
705 else if (ipr_cmd->qc)
706 ipr_cmd->done = ipr_sata_eh_done;
672 707
673 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); 708 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
674 del_timer(&ipr_cmd->timer); 709 del_timer(&ipr_cmd->timer);
@@ -825,6 +860,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
825 res->del_from_ml = 0; 860 res->del_from_ml = 0;
826 res->resetting_device = 0; 861 res->resetting_device = 0;
827 res->sdev = NULL; 862 res->sdev = NULL;
863 res->sata_port = NULL;
828} 864}
829 865
830/** 866/**
@@ -1316,7 +1352,7 @@ static u32 ipr_get_error(u32 ioasc)
1316 int i; 1352 int i;
1317 1353
1318 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 1354 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1319 if (ipr_error_table[i].ioasc == ioasc) 1355 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1320 return i; 1356 return i;
1321 1357
1322 return 0; 1358 return 0;
@@ -3051,6 +3087,17 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3051 **/ 3087 **/
3052static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 3088static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3053{ 3089{
3090 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3091 struct ipr_resource_entry *res;
3092 unsigned long lock_flags = 0;
3093
3094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3095 res = (struct ipr_resource_entry *)sdev->hostdata;
3096
3097 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3098 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3100
3054 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 3101 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3055 return sdev->queue_depth; 3102 return sdev->queue_depth;
3056} 3103}
@@ -3166,6 +3213,122 @@ static int ipr_biosparam(struct scsi_device *sdev,
3166} 3213}
3167 3214
3168/** 3215/**
3216 * ipr_find_starget - Find target based on bus/target.
3217 * @starget: scsi target struct
3218 *
3219 * Return value:
3220 * resource entry pointer if found / NULL if not found
3221 **/
3222static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3223{
3224 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3225 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3226 struct ipr_resource_entry *res;
3227
3228 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3229 if ((res->cfgte.res_addr.bus == starget->channel) &&
3230 (res->cfgte.res_addr.target == starget->id) &&
3231 (res->cfgte.res_addr.lun == 0)) {
3232 return res;
3233 }
3234 }
3235
3236 return NULL;
3237}
3238
3239static struct ata_port_info sata_port_info;
3240
3241/**
3242 * ipr_target_alloc - Prepare for commands to a SCSI target
3243 * @starget: scsi target struct
3244 *
3245 * If the device is a SATA device, this function allocates an
3246 * ATA port with libata, else it does nothing.
3247 *
3248 * Return value:
3249 * 0 on success / non-0 on failure
3250 **/
3251static int ipr_target_alloc(struct scsi_target *starget)
3252{
3253 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3254 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3255 struct ipr_sata_port *sata_port;
3256 struct ata_port *ap;
3257 struct ipr_resource_entry *res;
3258 unsigned long lock_flags;
3259
3260 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3261 res = ipr_find_starget(starget);
3262 starget->hostdata = NULL;
3263
3264 if (res && ipr_is_gata(res)) {
3265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3266 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3267 if (!sata_port)
3268 return -ENOMEM;
3269
3270 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3271 if (ap) {
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273 sata_port->ioa_cfg = ioa_cfg;
3274 sata_port->ap = ap;
3275 sata_port->res = res;
3276
3277 res->sata_port = sata_port;
3278 ap->private_data = sata_port;
3279 starget->hostdata = sata_port;
3280 } else {
3281 kfree(sata_port);
3282 return -ENOMEM;
3283 }
3284 }
3285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3286
3287 return 0;
3288}
3289
3290/**
3291 * ipr_target_destroy - Destroy a SCSI target
3292 * @starget: scsi target struct
3293 *
3294 * If the device was a SATA device, this function frees the libata
3295 * ATA port, else it does nothing.
3296 *
3297 **/
3298static void ipr_target_destroy(struct scsi_target *starget)
3299{
3300 struct ipr_sata_port *sata_port = starget->hostdata;
3301
3302 if (sata_port) {
3303 starget->hostdata = NULL;
3304 ata_sas_port_destroy(sata_port->ap);
3305 kfree(sata_port);
3306 }
3307}
3308
3309/**
3310 * ipr_find_sdev - Find device based on bus/target/lun.
3311 * @sdev: scsi device struct
3312 *
3313 * Return value:
3314 * resource entry pointer if found / NULL if not found
3315 **/
3316static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3317{
3318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3319 struct ipr_resource_entry *res;
3320
3321 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3322 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3323 (res->cfgte.res_addr.target == sdev->id) &&
3324 (res->cfgte.res_addr.lun == sdev->lun))
3325 return res;
3326 }
3327
3328 return NULL;
3329}
3330
3331/**
3169 * ipr_slave_destroy - Unconfigure a SCSI device 3332 * ipr_slave_destroy - Unconfigure a SCSI device
3170 * @sdev: scsi device struct 3333 * @sdev: scsi device struct
3171 * 3334 *
@@ -3183,8 +3346,11 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
3183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3346 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3184 res = (struct ipr_resource_entry *) sdev->hostdata; 3347 res = (struct ipr_resource_entry *) sdev->hostdata;
3185 if (res) { 3348 if (res) {
3349 if (res->sata_port)
3350 ata_port_disable(res->sata_port->ap);
3186 sdev->hostdata = NULL; 3351 sdev->hostdata = NULL;
3187 res->sdev = NULL; 3352 res->sdev = NULL;
3353 res->sata_port = NULL;
3188 } 3354 }
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190} 3356}
@@ -3219,13 +3385,45 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3219 } 3385 }
3220 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3386 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3221 sdev->allow_restart = 1; 3387 sdev->allow_restart = 1;
3222 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 3388 if (ipr_is_gata(res) && res->sata_port) {
3389 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3390 ata_sas_slave_configure(sdev, res->sata_port->ap);
3391 } else {
3392 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3393 }
3223 } 3394 }
3224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 return 0; 3396 return 0;
3226} 3397}
3227 3398
3228/** 3399/**
3400 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3401 * @sdev: scsi device struct
3402 *
3403 * This function initializes an ATA port so that future commands
3404 * sent through queuecommand will work.
3405 *
3406 * Return value:
3407 * 0 on success
3408 **/
3409static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3410{
3411 struct ipr_sata_port *sata_port = NULL;
3412 int rc = -ENXIO;
3413
3414 ENTER;
3415 if (sdev->sdev_target)
3416 sata_port = sdev->sdev_target->hostdata;
3417 if (sata_port)
3418 rc = ata_sas_port_init(sata_port->ap);
3419 if (rc)
3420 ipr_slave_destroy(sdev);
3421
3422 LEAVE;
3423 return rc;
3424}
3425
3426/**
3229 * ipr_slave_alloc - Prepare for commands to a device. 3427 * ipr_slave_alloc - Prepare for commands to a device.
3230 * @sdev: scsi device struct 3428 * @sdev: scsi device struct
3231 * 3429 *
@@ -3248,18 +3446,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
3248 3446
3249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250 3448
3251 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3449 res = ipr_find_sdev(sdev);
3252 if ((res->cfgte.res_addr.bus == sdev->channel) && 3450 if (res) {
3253 (res->cfgte.res_addr.target == sdev->id) && 3451 res->sdev = sdev;
3254 (res->cfgte.res_addr.lun == sdev->lun)) { 3452 res->add_to_ml = 0;
3255 res->sdev = sdev; 3453 res->in_erp = 0;
3256 res->add_to_ml = 0; 3454 sdev->hostdata = res;
3257 res->in_erp = 0; 3455 if (!ipr_is_naca_model(res))
3258 sdev->hostdata = res; 3456 res->needs_sync_complete = 1;
3259 if (!ipr_is_naca_model(res)) 3457 rc = 0;
3260 res->needs_sync_complete = 1; 3458 if (ipr_is_gata(res)) {
3261 rc = 0; 3459 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262 break; 3460 return ipr_ata_slave_alloc(sdev);
3263 } 3461 }
3264 } 3462 }
3265 3463
@@ -3314,7 +3512,8 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3314 * This function issues a device reset to the affected device. 3512 * This function issues a device reset to the affected device.
3315 * If the device is a SCSI device, a LUN reset will be sent 3513 * If the device is a SCSI device, a LUN reset will be sent
3316 * to the device first. If that does not work, a target reset 3514 * to the device first. If that does not work, a target reset
3317 * will be sent. 3515 * will be sent. If the device is a SATA device, a PHY reset will
3516 * be sent.
3318 * 3517 *
3319 * Return value: 3518 * Return value:
3320 * 0 on success / non-zero on failure 3519 * 0 on success / non-zero on failure
@@ -3325,26 +3524,79 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3325 struct ipr_cmnd *ipr_cmd; 3524 struct ipr_cmnd *ipr_cmd;
3326 struct ipr_ioarcb *ioarcb; 3525 struct ipr_ioarcb *ioarcb;
3327 struct ipr_cmd_pkt *cmd_pkt; 3526 struct ipr_cmd_pkt *cmd_pkt;
3527 struct ipr_ioarcb_ata_regs *regs;
3328 u32 ioasc; 3528 u32 ioasc;
3329 3529
3330 ENTER; 3530 ENTER;
3331 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 3531 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3332 ioarcb = &ipr_cmd->ioarcb; 3532 ioarcb = &ipr_cmd->ioarcb;
3333 cmd_pkt = &ioarcb->cmd_pkt; 3533 cmd_pkt = &ioarcb->cmd_pkt;
3534 regs = &ioarcb->add_data.u.regs;
3334 3535
3335 ioarcb->res_handle = res->cfgte.res_handle; 3536 ioarcb->res_handle = res->cfgte.res_handle;
3336 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 3537 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3337 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 3538 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3539 if (ipr_is_gata(res)) {
3540 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3541 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3542 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3543 }
3338 3544
3339 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 3545 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3340 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 3546 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3341 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 3547 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3548 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3549 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3550 sizeof(struct ipr_ioasa_gata));
3342 3551
3343 LEAVE; 3552 LEAVE;
3344 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); 3553 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3345} 3554}
3346 3555
3347/** 3556/**
3557 * ipr_sata_reset - Reset the SATA port
3558 * @ap: SATA port to reset
3559 * @classes: class of the attached device
3560 *
3561 * This function issues a SATA phy reset to the affected ATA port.
3562 *
3563 * Return value:
3564 * 0 on success / non-zero on failure
3565 **/
3566static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3567{
3568 struct ipr_sata_port *sata_port = ap->private_data;
3569 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3570 struct ipr_resource_entry *res;
3571 unsigned long lock_flags = 0;
3572 int rc = -ENXIO;
3573
3574 ENTER;
3575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3576 res = sata_port->res;
3577 if (res) {
3578 rc = ipr_device_reset(ioa_cfg, res);
3579 switch(res->cfgte.proto) {
3580 case IPR_PROTO_SATA:
3581 case IPR_PROTO_SAS_STP:
3582 *classes = ATA_DEV_ATA;
3583 break;
3584 case IPR_PROTO_SATA_ATAPI:
3585 case IPR_PROTO_SAS_STP_ATAPI:
3586 *classes = ATA_DEV_ATAPI;
3587 break;
3588 default:
3589 *classes = ATA_DEV_UNKNOWN;
3590 break;
3591 };
3592 }
3593
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595 LEAVE;
3596 return rc;
3597}
3598
3599/**
3348 * ipr_eh_dev_reset - Reset the device 3600 * ipr_eh_dev_reset - Reset the device
3349 * @scsi_cmd: scsi command struct 3601 * @scsi_cmd: scsi command struct
3350 * 3602 *
@@ -3360,7 +3612,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3360 struct ipr_cmnd *ipr_cmd; 3612 struct ipr_cmnd *ipr_cmd;
3361 struct ipr_ioa_cfg *ioa_cfg; 3613 struct ipr_ioa_cfg *ioa_cfg;
3362 struct ipr_resource_entry *res; 3614 struct ipr_resource_entry *res;
3363 int rc; 3615 struct ata_port *ap;
3616 int rc = 0;
3364 3617
3365 ENTER; 3618 ENTER;
3366 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 3619 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -3388,7 +3641,14 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3388 3641
3389 res->resetting_device = 1; 3642 res->resetting_device = 1;
3390 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 3643 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3391 rc = ipr_device_reset(ioa_cfg, res); 3644
3645 if (ipr_is_gata(res) && res->sata_port) {
3646 ap = res->sata_port->ap;
3647 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3648 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3649 spin_lock_irq(scsi_cmd->device->host->host_lock);
3650 } else
3651 rc = ipr_device_reset(ioa_cfg, res);
3392 res->resetting_device = 0; 3652 res->resetting_device = 0;
3393 3653
3394 LEAVE; 3654 LEAVE;
@@ -4300,6 +4560,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4300 return 0; 4560 return 0;
4301 } 4561 }
4302 4562
4563 if (ipr_is_gata(res) && res->sata_port)
4564 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4565
4303 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4566 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4304 ioarcb = &ipr_cmd->ioarcb; 4567 ioarcb = &ipr_cmd->ioarcb;
4305 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 4568 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
@@ -4345,6 +4608,26 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4345} 4608}
4346 4609
4347/** 4610/**
4611 * ipr_ioctl - IOCTL handler
4612 * @sdev: scsi device struct
4613 * @cmd: IOCTL cmd
4614 * @arg: IOCTL arg
4615 *
4616 * Return value:
4617 * 0 on success / other on failure
4618 **/
4619int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4620{
4621 struct ipr_resource_entry *res;
4622
4623 res = (struct ipr_resource_entry *)sdev->hostdata;
4624 if (res && ipr_is_gata(res))
4625 return ata_scsi_ioctl(sdev, cmd, arg);
4626
4627 return -EINVAL;
4628}
4629
4630/**
4348 * ipr_info - Get information about the card/driver 4631 * ipr_info - Get information about the card/driver
4349 * @scsi_host: scsi host struct 4632 * @scsi_host: scsi host struct
4350 * 4633 *
@@ -4366,10 +4649,45 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
4366 return buffer; 4649 return buffer;
4367} 4650}
4368 4651
4652/**
4653 * ipr_scsi_timed_out - Handle scsi command timeout
4654 * @scsi_cmd: scsi command struct
4655 *
4656 * Return value:
4657 * EH_NOT_HANDLED
4658 **/
4659enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
4660{
4661 struct ipr_ioa_cfg *ioa_cfg;
4662 struct ipr_cmnd *ipr_cmd;
4663 unsigned long flags;
4664
4665 ENTER;
4666 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4667 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4668
4669 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4670 if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
4671 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4672 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4673 break;
4674 }
4675 }
4676
4677 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4678 LEAVE;
4679 return EH_NOT_HANDLED;
4680}
4681
4682static struct scsi_transport_template ipr_transport_template = {
4683 .eh_timed_out = ipr_scsi_timed_out
4684};
4685
4369static struct scsi_host_template driver_template = { 4686static struct scsi_host_template driver_template = {
4370 .module = THIS_MODULE, 4687 .module = THIS_MODULE,
4371 .name = "IPR", 4688 .name = "IPR",
4372 .info = ipr_ioa_info, 4689 .info = ipr_ioa_info,
4690 .ioctl = ipr_ioctl,
4373 .queuecommand = ipr_queuecommand, 4691 .queuecommand = ipr_queuecommand,
4374 .eh_abort_handler = ipr_eh_abort, 4692 .eh_abort_handler = ipr_eh_abort,
4375 .eh_device_reset_handler = ipr_eh_dev_reset, 4693 .eh_device_reset_handler = ipr_eh_dev_reset,
@@ -4377,6 +4695,8 @@ static struct scsi_host_template driver_template = {
4377 .slave_alloc = ipr_slave_alloc, 4695 .slave_alloc = ipr_slave_alloc,
4378 .slave_configure = ipr_slave_configure, 4696 .slave_configure = ipr_slave_configure,
4379 .slave_destroy = ipr_slave_destroy, 4697 .slave_destroy = ipr_slave_destroy,
4698 .target_alloc = ipr_target_alloc,
4699 .target_destroy = ipr_target_destroy,
4380 .change_queue_depth = ipr_change_queue_depth, 4700 .change_queue_depth = ipr_change_queue_depth,
4381 .change_queue_type = ipr_change_queue_type, 4701 .change_queue_type = ipr_change_queue_type,
4382 .bios_param = ipr_biosparam, 4702 .bios_param = ipr_biosparam,
@@ -4391,6 +4711,330 @@ static struct scsi_host_template driver_template = {
4391 .proc_name = IPR_NAME 4711 .proc_name = IPR_NAME
4392}; 4712};
4393 4713
4714/**
4715 * ipr_ata_phy_reset - libata phy_reset handler
4716 * @ap: ata port to reset
4717 *
4718 **/
4719static void ipr_ata_phy_reset(struct ata_port *ap)
4720{
4721 unsigned long flags;
4722 struct ipr_sata_port *sata_port = ap->private_data;
4723 struct ipr_resource_entry *res = sata_port->res;
4724 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4725 int rc;
4726
4727 ENTER;
4728 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4729 while(ioa_cfg->in_reset_reload) {
4730 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4731 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4732 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4733 }
4734
4735 if (!ioa_cfg->allow_cmds)
4736 goto out_unlock;
4737
4738 rc = ipr_device_reset(ioa_cfg, res);
4739
4740 if (rc) {
4741 ap->ops->port_disable(ap);
4742 goto out_unlock;
4743 }
4744
4745 switch(res->cfgte.proto) {
4746 case IPR_PROTO_SATA:
4747 case IPR_PROTO_SAS_STP:
4748 ap->device[0].class = ATA_DEV_ATA;
4749 break;
4750 case IPR_PROTO_SATA_ATAPI:
4751 case IPR_PROTO_SAS_STP_ATAPI:
4752 ap->device[0].class = ATA_DEV_ATAPI;
4753 break;
4754 default:
4755 ap->device[0].class = ATA_DEV_UNKNOWN;
4756 ap->ops->port_disable(ap);
4757 break;
4758 };
4759
4760out_unlock:
4761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4762 LEAVE;
4763}
4764
4765/**
4766 * ipr_ata_post_internal - Cleanup after an internal command
4767 * @qc: ATA queued command
4768 *
4769 * Return value:
4770 * none
4771 **/
4772static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4773{
4774 struct ipr_sata_port *sata_port = qc->ap->private_data;
4775 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4776 struct ipr_cmnd *ipr_cmd;
4777 unsigned long flags;
4778
4779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4780 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4781 if (ipr_cmd->qc == qc) {
4782 ipr_device_reset(ioa_cfg, sata_port->res);
4783 break;
4784 }
4785 }
4786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4787}
4788
4789/**
4790 * ipr_tf_read - Read the current ATA taskfile for the ATA port
4791 * @ap: ATA port
4792 * @tf: destination ATA taskfile
4793 *
4794 * Return value:
4795 * none
4796 **/
4797static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
4798{
4799 struct ipr_sata_port *sata_port = ap->private_data;
4800 struct ipr_ioasa_gata *g = &sata_port->ioasa;
4801
4802 tf->feature = g->error;
4803 tf->nsect = g->nsect;
4804 tf->lbal = g->lbal;
4805 tf->lbam = g->lbam;
4806 tf->lbah = g->lbah;
4807 tf->device = g->device;
4808 tf->command = g->status;
4809 tf->hob_nsect = g->hob_nsect;
4810 tf->hob_lbal = g->hob_lbal;
4811 tf->hob_lbam = g->hob_lbam;
4812 tf->hob_lbah = g->hob_lbah;
4813 tf->ctl = g->alt_status;
4814}
4815
4816/**
4817 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
4818 * @regs: destination
4819 * @tf: source ATA taskfile
4820 *
4821 * Return value:
4822 * none
4823 **/
4824static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
4825 struct ata_taskfile *tf)
4826{
4827 regs->feature = tf->feature;
4828 regs->nsect = tf->nsect;
4829 regs->lbal = tf->lbal;
4830 regs->lbam = tf->lbam;
4831 regs->lbah = tf->lbah;
4832 regs->device = tf->device;
4833 regs->command = tf->command;
4834 regs->hob_feature = tf->hob_feature;
4835 regs->hob_nsect = tf->hob_nsect;
4836 regs->hob_lbal = tf->hob_lbal;
4837 regs->hob_lbam = tf->hob_lbam;
4838 regs->hob_lbah = tf->hob_lbah;
4839 regs->ctl = tf->ctl;
4840}
4841
4842/**
4843 * ipr_sata_done - done function for SATA commands
4844 * @ipr_cmd: ipr command struct
4845 *
4846 * This function is invoked by the interrupt handler for
4847 * ops generated by the SCSI mid-layer to SATA devices
4848 *
4849 * Return value:
4850 * none
4851 **/
4852static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
4853{
4854 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4855 struct ata_queued_cmd *qc = ipr_cmd->qc;
4856 struct ipr_sata_port *sata_port = qc->ap->private_data;
4857 struct ipr_resource_entry *res = sata_port->res;
4858 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4859
4860 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4861 sizeof(struct ipr_ioasa_gata));
4862 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4863
4864 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
4865 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
4866 res->cfgte.res_addr.target);
4867
4868 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4869 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4870 else
4871 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4872 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4873 ata_qc_complete(qc);
4874}
4875
4876/**
4877 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
4878 * @ipr_cmd: ipr command struct
4879 * @qc: ATA queued command
4880 *
4881 **/
4882static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
4883 struct ata_queued_cmd *qc)
4884{
4885 u32 ioadl_flags = 0;
4886 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4887 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4888 int len = qc->nbytes + qc->pad_len;
4889 struct scatterlist *sg;
4890
4891 if (len == 0)
4892 return;
4893
4894 if (qc->dma_dir == DMA_TO_DEVICE) {
4895 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4896 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4897 ioarcb->write_data_transfer_length = cpu_to_be32(len);
4898 ioarcb->write_ioadl_len =
4899 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4900 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
4901 ioadl_flags = IPR_IOADL_FLAGS_READ;
4902 ioarcb->read_data_transfer_length = cpu_to_be32(len);
4903 ioarcb->read_ioadl_len =
4904 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4905 }
4906
4907 ata_for_each_sg(sg, qc) {
4908 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4909 ioadl->address = cpu_to_be32(sg_dma_address(sg));
4910 if (ata_sg_is_last(sg, qc))
4911 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4912 else
4913 ioadl++;
4914 }
4915}
4916
4917/**
4918 * ipr_qc_issue - Issue a SATA qc to a device
4919 * @qc: queued command
4920 *
4921 * Return value:
4922 * 0 if success
4923 **/
4924static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
4925{
4926 struct ata_port *ap = qc->ap;
4927 struct ipr_sata_port *sata_port = ap->private_data;
4928 struct ipr_resource_entry *res = sata_port->res;
4929 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4930 struct ipr_cmnd *ipr_cmd;
4931 struct ipr_ioarcb *ioarcb;
4932 struct ipr_ioarcb_ata_regs *regs;
4933
4934 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
4935 return -EIO;
4936
4937 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4938 ioarcb = &ipr_cmd->ioarcb;
4939 regs = &ioarcb->add_data.u.regs;
4940
4941 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
4942 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
4943
4944 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4945 ipr_cmd->qc = qc;
4946 ipr_cmd->done = ipr_sata_done;
4947 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4948 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
4949 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4950 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4951 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
4952
4953 ipr_build_ata_ioadl(ipr_cmd, qc);
4954 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4955 ipr_copy_sata_tf(regs, &qc->tf);
4956 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
4957 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4958
4959 switch (qc->tf.protocol) {
4960 case ATA_PROT_NODATA:
4961 case ATA_PROT_PIO:
4962 break;
4963
4964 case ATA_PROT_DMA:
4965 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4966 break;
4967
4968 case ATA_PROT_ATAPI:
4969 case ATA_PROT_ATAPI_NODATA:
4970 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4971 break;
4972
4973 case ATA_PROT_ATAPI_DMA:
4974 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4975 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4976 break;
4977
4978 default:
4979 WARN_ON(1);
4980 return -1;
4981 }
4982
4983 mb();
4984 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
4985 ioa_cfg->regs.ioarrin_reg);
4986 return 0;
4987}
4988
4989/**
4990 * ipr_ata_check_status - Return last ATA status
4991 * @ap: ATA port
4992 *
4993 * Return value:
4994 * ATA status
4995 **/
4996static u8 ipr_ata_check_status(struct ata_port *ap)
4997{
4998 struct ipr_sata_port *sata_port = ap->private_data;
4999 return sata_port->ioasa.status;
5000}
5001
5002/**
5003 * ipr_ata_check_altstatus - Return last ATA altstatus
5004 * @ap: ATA port
5005 *
5006 * Return value:
5007 * Alt ATA status
5008 **/
5009static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5010{
5011 struct ipr_sata_port *sata_port = ap->private_data;
5012 return sata_port->ioasa.alt_status;
5013}
5014
5015static struct ata_port_operations ipr_sata_ops = {
5016 .port_disable = ata_port_disable,
5017 .check_status = ipr_ata_check_status,
5018 .check_altstatus = ipr_ata_check_altstatus,
5019 .dev_select = ata_noop_dev_select,
5020 .phy_reset = ipr_ata_phy_reset,
5021 .post_internal_cmd = ipr_ata_post_internal,
5022 .tf_read = ipr_tf_read,
5023 .qc_prep = ata_noop_qc_prep,
5024 .qc_issue = ipr_qc_issue,
5025 .port_start = ata_sas_port_start,
5026 .port_stop = ata_sas_port_stop
5027};
5028
5029static struct ata_port_info sata_port_info = {
5030 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5031 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5032 .pio_mask = 0x10, /* pio4 */
5033 .mwdma_mask = 0x07,
5034 .udma_mask = 0x7f, /* udma0-6 */
5035 .port_ops = &ipr_sata_ops
5036};
5037
4394#ifdef CONFIG_PPC_PSERIES 5038#ifdef CONFIG_PPC_PSERIES
4395static const u16 ipr_blocked_processors[] = { 5039static const u16 ipr_blocked_processors[] = {
4396 PV_NORTHSTAR, 5040 PV_NORTHSTAR,
@@ -6352,7 +6996,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6352 struct Scsi_Host *host; 6996 struct Scsi_Host *host;
6353 unsigned long ipr_regs_pci; 6997 unsigned long ipr_regs_pci;
6354 void __iomem *ipr_regs; 6998 void __iomem *ipr_regs;
6355 u32 rc = PCIBIOS_SUCCESSFUL; 6999 int rc = PCIBIOS_SUCCESSFUL;
6356 volatile u32 mask, uproc; 7000 volatile u32 mask, uproc;
6357 7001
6358 ENTER; 7002 ENTER;
@@ -6374,6 +7018,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6374 7018
6375 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 7019 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6376 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 7020 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7021 host->transportt = &ipr_transport_template;
7022 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7023 sata_port_info.flags, &ipr_sata_ops);
6377 7024
6378 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); 7025 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6379 7026
@@ -6749,7 +7396,7 @@ static int __init ipr_init(void)
6749 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 7396 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6750 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 7397 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6751 7398
6752 return pci_module_init(&ipr_driver); 7399 return pci_register_driver(&ipr_driver);
6753} 7400}
6754 7401
6755/** 7402/**
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 11eaff52432..6d035283af0 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -28,6 +28,7 @@
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/completion.h> 30#include <linux/completion.h>
31#include <linux/libata.h>
31#include <linux/list.h> 32#include <linux/list.h>
32#include <linux/kref.h> 33#include <linux/kref.h>
33#include <scsi/scsi.h> 34#include <scsi/scsi.h>
@@ -36,8 +37,8 @@
36/* 37/*
37 * Literals 38 * Literals
38 */ 39 */
39#define IPR_DRIVER_VERSION "2.1.4" 40#define IPR_DRIVER_VERSION "2.2.0"
40#define IPR_DRIVER_DATE "(August 2, 2006)" 41#define IPR_DRIVER_DATE "(September 25, 2006)"
41 42
42/* 43/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -849,6 +850,13 @@ struct ipr_bus_attributes {
849 u32 max_xfer_rate; 850 u32 max_xfer_rate;
850}; 851};
851 852
853struct ipr_sata_port {
854 struct ipr_ioa_cfg *ioa_cfg;
855 struct ata_port *ap;
856 struct ipr_resource_entry *res;
857 struct ipr_ioasa_gata ioasa;
858};
859
852struct ipr_resource_entry { 860struct ipr_resource_entry {
853 struct ipr_config_table_entry cfgte; 861 struct ipr_config_table_entry cfgte;
854 u8 needs_sync_complete:1; 862 u8 needs_sync_complete:1;
@@ -858,6 +866,7 @@ struct ipr_resource_entry {
858 u8 resetting_device:1; 866 u8 resetting_device:1;
859 867
860 struct scsi_device *sdev; 868 struct scsi_device *sdev;
869 struct ipr_sata_port *sata_port;
861 struct list_head queue; 870 struct list_head queue;
862}; 871};
863 872
@@ -928,10 +937,11 @@ struct ipr_trace_entry {
928 u32 time; 937 u32 time;
929 938
930 u8 op_code; 939 u8 op_code;
940 u8 ata_op_code;
931 u8 type; 941 u8 type;
932#define IPR_TRACE_START 0x00 942#define IPR_TRACE_START 0x00
933#define IPR_TRACE_FINISH 0xff 943#define IPR_TRACE_FINISH 0xff
934 u16 cmd_index; 944 u8 cmd_index;
935 945
936 __be32 res_handle; 946 __be32 res_handle;
937 union { 947 union {
@@ -1073,6 +1083,7 @@ struct ipr_ioa_cfg {
1073 1083
1074 struct ipr_cmnd *reset_cmd; 1084 struct ipr_cmnd *reset_cmd;
1075 1085
1086 struct ata_host ata_host;
1076 char ipr_cmd_label[8]; 1087 char ipr_cmd_label[8];
1077#define IPR_CMD_LABEL "ipr_cmnd" 1088#define IPR_CMD_LABEL "ipr_cmnd"
1078 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1089 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
@@ -1085,6 +1096,7 @@ struct ipr_cmnd {
1085 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; 1096 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
1086 struct list_head queue; 1097 struct list_head queue;
1087 struct scsi_cmnd *scsi_cmd; 1098 struct scsi_cmnd *scsi_cmd;
1099 struct ata_queued_cmd *qc;
1088 struct completion completion; 1100 struct completion completion;
1089 struct timer_list timer; 1101 struct timer_list timer;
1090 void (*done) (struct ipr_cmnd *); 1102 void (*done) (struct ipr_cmnd *);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 3c639286ec1..9a9ab297cf1 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -182,14 +182,8 @@
182#include <linux/dma-mapping.h> 182#include <linux/dma-mapping.h>
183 183
184#include <scsi/sg.h> 184#include <scsi/sg.h>
185
186#include "scsi.h" 185#include "scsi.h"
187
188#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
189#include "hosts.h"
190#else
191#include <scsi/scsi_host.h> 186#include <scsi/scsi_host.h>
192#endif
193 187
194#include "ips.h" 188#include "ips.h"
195 189
@@ -250,9 +244,9 @@ module_param(ips, charp, 0);
250 */ 244 */
251static int ips_detect(struct scsi_host_template *); 245static int ips_detect(struct scsi_host_template *);
252static int ips_release(struct Scsi_Host *); 246static int ips_release(struct Scsi_Host *);
253static int ips_eh_abort(Scsi_Cmnd *); 247static int ips_eh_abort(struct scsi_cmnd *);
254static int ips_eh_reset(Scsi_Cmnd *); 248static int ips_eh_reset(struct scsi_cmnd *);
255static int ips_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); 249static int ips_queue(struct scsi_cmnd *, void (*)(struct scsi_cmnd *));
256static const char *ips_info(struct Scsi_Host *); 250static const char *ips_info(struct Scsi_Host *);
257static irqreturn_t do_ipsintr(int, void *, struct pt_regs *); 251static irqreturn_t do_ipsintr(int, void *, struct pt_regs *);
258static int ips_hainit(ips_ha_t *); 252static int ips_hainit(ips_ha_t *);
@@ -325,24 +319,26 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
325static uint32_t ips_statupd_morpheus(ips_ha_t *); 319static uint32_t ips_statupd_morpheus(ips_ha_t *);
326static ips_scb_t *ips_getscb(ips_ha_t *); 320static ips_scb_t *ips_getscb(ips_ha_t *);
327static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); 321static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
328static void ips_putq_wait_tail(ips_wait_queue_t *, Scsi_Cmnd *); 322static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
329static void ips_putq_copp_tail(ips_copp_queue_t *, 323static void ips_putq_copp_tail(ips_copp_queue_t *,
330 ips_copp_wait_item_t *); 324 ips_copp_wait_item_t *);
331static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); 325static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
332static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); 326static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
333static Scsi_Cmnd *ips_removeq_wait_head(ips_wait_queue_t *); 327static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
334static Scsi_Cmnd *ips_removeq_wait(ips_wait_queue_t *, Scsi_Cmnd *); 328static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
329 struct scsi_cmnd *);
335static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, 330static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
336 ips_copp_wait_item_t *); 331 ips_copp_wait_item_t *);
337static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *); 332static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
338 333
339static int ips_is_passthru(Scsi_Cmnd *); 334static int ips_is_passthru(struct scsi_cmnd *);
340static int ips_make_passthru(ips_ha_t *, Scsi_Cmnd *, ips_scb_t *, int); 335static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
341static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *); 336static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
342static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *); 337static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
343static void ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, 338static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
344 unsigned int count); 339 unsigned int count);
345static void ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned int count); 340static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
341 unsigned int count);
346 342
347static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); 343static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
348static int ips_host_info(ips_ha_t *, char *, off_t, int); 344static int ips_host_info(ips_ha_t *, char *, off_t, int);
@@ -812,8 +808,7 @@ ips_halt(struct notifier_block *nb, ulong event, void *buf)
812/* Abort a command (using the new error code stuff) */ 808/* Abort a command (using the new error code stuff) */
813/* Note: this routine is called under the io_request_lock */ 809/* Note: this routine is called under the io_request_lock */
814/****************************************************************************/ 810/****************************************************************************/
815int 811int ips_eh_abort(struct scsi_cmnd *SC)
816ips_eh_abort(Scsi_Cmnd * SC)
817{ 812{
818 ips_ha_t *ha; 813 ips_ha_t *ha;
819 ips_copp_wait_item_t *item; 814 ips_copp_wait_item_t *item;
@@ -871,8 +866,7 @@ ips_eh_abort(Scsi_Cmnd * SC)
871/* NOTE: this routine is called under the io_request_lock spinlock */ 866/* NOTE: this routine is called under the io_request_lock spinlock */
872/* */ 867/* */
873/****************************************************************************/ 868/****************************************************************************/
874static int 869static int __ips_eh_reset(struct scsi_cmnd *SC)
875__ips_eh_reset(Scsi_Cmnd * SC)
876{ 870{
877 int ret; 871 int ret;
878 int i; 872 int i;
@@ -968,7 +962,7 @@ __ips_eh_reset(Scsi_Cmnd * SC)
968 ret = (*ha->func.reset) (ha); 962 ret = (*ha->func.reset) (ha);
969 963
970 if (!ret) { 964 if (!ret) {
971 Scsi_Cmnd *scsi_cmd; 965 struct scsi_cmnd *scsi_cmd;
972 966
973 IPS_PRINTK(KERN_NOTICE, ha->pcidev, 967 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
974 "Controller reset failed - controller now offline.\n"); 968 "Controller reset failed - controller now offline.\n");
@@ -997,7 +991,7 @@ __ips_eh_reset(Scsi_Cmnd * SC)
997 } 991 }
998 992
999 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { 993 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
1000 Scsi_Cmnd *scsi_cmd; 994 struct scsi_cmnd *scsi_cmd;
1001 995
1002 IPS_PRINTK(KERN_NOTICE, ha->pcidev, 996 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
1003 "Controller reset failed - controller now offline.\n"); 997 "Controller reset failed - controller now offline.\n");
@@ -1059,8 +1053,7 @@ __ips_eh_reset(Scsi_Cmnd * SC)
1059 1053
1060} 1054}
1061 1055
1062static int 1056static int ips_eh_reset(struct scsi_cmnd *SC)
1063ips_eh_reset(Scsi_Cmnd * SC)
1064{ 1057{
1065 int rc; 1058 int rc;
1066 1059
@@ -1083,8 +1076,7 @@ ips_eh_reset(Scsi_Cmnd * SC)
1083/* Linux obtains io_request_lock before calling this function */ 1076/* Linux obtains io_request_lock before calling this function */
1084/* */ 1077/* */
1085/****************************************************************************/ 1078/****************************************************************************/
1086static int 1079static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1087ips_queue(Scsi_Cmnd * SC, void (*done) (Scsi_Cmnd *))
1088{ 1080{
1089 ips_ha_t *ha; 1081 ips_ha_t *ha;
1090 ips_passthru_t *pt; 1082 ips_passthru_t *pt;
@@ -1602,8 +1594,7 @@ ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1602/* Determine if the specified SCSI command is really a passthru command */ 1594/* Determine if the specified SCSI command is really a passthru command */
1603/* */ 1595/* */
1604/****************************************************************************/ 1596/****************************************************************************/
1605static int 1597static int ips_is_passthru(struct scsi_cmnd *SC)
1606ips_is_passthru(Scsi_Cmnd * SC)
1607{ 1598{
1608 unsigned long flags; 1599 unsigned long flags;
1609 1600
@@ -1685,7 +1676,7 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1685/* */ 1676/* */
1686/****************************************************************************/ 1677/****************************************************************************/
1687static int 1678static int
1688ips_make_passthru(ips_ha_t * ha, Scsi_Cmnd * SC, ips_scb_t * scb, int intr) 1679ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1689{ 1680{
1690 ips_passthru_t *pt; 1681 ips_passthru_t *pt;
1691 int length = 0; 1682 int length = 0;
@@ -2734,9 +2725,9 @@ static void
2734ips_next(ips_ha_t * ha, int intr) 2725ips_next(ips_ha_t * ha, int intr)
2735{ 2726{
2736 ips_scb_t *scb; 2727 ips_scb_t *scb;
2737 Scsi_Cmnd *SC; 2728 struct scsi_cmnd *SC;
2738 Scsi_Cmnd *p; 2729 struct scsi_cmnd *p;
2739 Scsi_Cmnd *q; 2730 struct scsi_cmnd *q;
2740 ips_copp_wait_item_t *item; 2731 ips_copp_wait_item_t *item;
2741 int ret; 2732 int ret;
2742 unsigned long cpu_flags = 0; 2733 unsigned long cpu_flags = 0;
@@ -2847,7 +2838,7 @@ ips_next(ips_ha_t * ha, int intr)
2847 dcdb_active[scmd_channel(p) - 2838 dcdb_active[scmd_channel(p) -
2848 1] & (1 << scmd_id(p)))) { 2839 1] & (1 << scmd_id(p)))) {
2849 ips_freescb(ha, scb); 2840 ips_freescb(ha, scb);
2850 p = (Scsi_Cmnd *) p->host_scribble; 2841 p = (struct scsi_cmnd *) p->host_scribble;
2851 continue; 2842 continue;
2852 } 2843 }
2853 2844
@@ -2962,7 +2953,7 @@ ips_next(ips_ha_t * ha, int intr)
2962 break; 2953 break;
2963 } /* end case */ 2954 } /* end case */
2964 2955
2965 p = (Scsi_Cmnd *) p->host_scribble; 2956 p = (struct scsi_cmnd *) p->host_scribble;
2966 2957
2967 } /* end while */ 2958 } /* end while */
2968 2959
@@ -3090,8 +3081,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
3090/* ASSUMED to be called from within the HA lock */ 3081/* ASSUMED to be called from within the HA lock */
3091/* */ 3082/* */
3092/****************************************************************************/ 3083/****************************************************************************/
3093static void 3084static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
3094ips_putq_wait_tail(ips_wait_queue_t * queue, Scsi_Cmnd * item)
3095{ 3085{
3096 METHOD_TRACE("ips_putq_wait_tail", 1); 3086 METHOD_TRACE("ips_putq_wait_tail", 1);
3097 3087
@@ -3122,10 +3112,9 @@ ips_putq_wait_tail(ips_wait_queue_t * queue, Scsi_Cmnd * item)
3122/* ASSUMED to be called from within the HA lock */ 3112/* ASSUMED to be called from within the HA lock */
3123/* */ 3113/* */
3124/****************************************************************************/ 3114/****************************************************************************/
3125static Scsi_Cmnd * 3115static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
3126ips_removeq_wait_head(ips_wait_queue_t * queue)
3127{ 3116{
3128 Scsi_Cmnd *item; 3117 struct scsi_cmnd *item;
3129 3118
3130 METHOD_TRACE("ips_removeq_wait_head", 1); 3119 METHOD_TRACE("ips_removeq_wait_head", 1);
3131 3120
@@ -3135,7 +3124,7 @@ ips_removeq_wait_head(ips_wait_queue_t * queue)
3135 return (NULL); 3124 return (NULL);
3136 } 3125 }
3137 3126
3138 queue->head = (Scsi_Cmnd *) item->host_scribble; 3127 queue->head = (struct scsi_cmnd *) item->host_scribble;
3139 item->host_scribble = NULL; 3128 item->host_scribble = NULL;
3140 3129
3141 if (queue->tail == item) 3130 if (queue->tail == item)
@@ -3157,10 +3146,10 @@ ips_removeq_wait_head(ips_wait_queue_t * queue)
3157/* ASSUMED to be called from within the HA lock */ 3146/* ASSUMED to be called from within the HA lock */
3158/* */ 3147/* */
3159/****************************************************************************/ 3148/****************************************************************************/
3160static Scsi_Cmnd * 3149static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
3161ips_removeq_wait(ips_wait_queue_t * queue, Scsi_Cmnd * item) 3150 struct scsi_cmnd *item)
3162{ 3151{
3163 Scsi_Cmnd *p; 3152 struct scsi_cmnd *p;
3164 3153
3165 METHOD_TRACE("ips_removeq_wait", 1); 3154 METHOD_TRACE("ips_removeq_wait", 1);
3166 3155
@@ -3173,8 +3162,8 @@ ips_removeq_wait(ips_wait_queue_t * queue, Scsi_Cmnd * item)
3173 3162
3174 p = queue->head; 3163 p = queue->head;
3175 3164
3176 while ((p) && (item != (Scsi_Cmnd *) p->host_scribble)) 3165 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
3177 p = (Scsi_Cmnd *) p->host_scribble; 3166 p = (struct scsi_cmnd *) p->host_scribble;
3178 3167
3179 if (p) { 3168 if (p) {
3180 /* found a match */ 3169 /* found a match */
@@ -3659,11 +3648,10 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3659/* Routine Name: ips_scmd_buf_write */ 3648/* Routine Name: ips_scmd_buf_write */
3660/* */ 3649/* */
3661/* Routine Description: */ 3650/* Routine Description: */
3662/* Write data to Scsi_Cmnd request_buffer at proper offsets */ 3651/* Write data to struct scsi_cmnd request_buffer at proper offsets */
3663/****************************************************************************/ 3652/****************************************************************************/
3664static void 3653static void
3665ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned 3654ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3666 int count)
3667{ 3655{
3668 if (scmd->use_sg) { 3656 if (scmd->use_sg) {
3669 int i; 3657 int i;
@@ -3698,11 +3686,10 @@ ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned
3698/* Routine Name: ips_scmd_buf_read */ 3686/* Routine Name: ips_scmd_buf_read */
3699/* */ 3687/* */
3700/* Routine Description: */ 3688/* Routine Description: */
3701/* Copy data from a Scsi_Cmnd to a new, linear buffer */ 3689/* Copy data from a struct scsi_cmnd to a new, linear buffer */
3702/****************************************************************************/ 3690/****************************************************************************/
3703static void 3691static void
3704ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned 3692ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3705 int count)
3706{ 3693{
3707 if (scmd->use_sg) { 3694 if (scmd->use_sg) {
3708 int i; 3695 int i;
@@ -7078,7 +7065,7 @@ ips_remove_device(struct pci_dev *pci_dev)
7078static int __init 7065static int __init
7079ips_module_init(void) 7066ips_module_init(void)
7080{ 7067{
7081 if (pci_module_init(&ips_pci_driver) < 0) 7068 if (pci_register_driver(&ips_pci_driver) < 0)
7082 return -ENODEV; 7069 return -ENODEV;
7083 ips_driver_template.module = THIS_MODULE; 7070 ips_driver_template.module = THIS_MODULE;
7084 ips_order_controllers(); 7071 ips_order_controllers();
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index f46c382e559..34680f3dd45 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -6,7 +6,7 @@
6/* David Jeffery, Adaptec, Inc. */ 6/* David Jeffery, Adaptec, Inc. */
7/* */ 7/* */
8/* Copyright (C) 1999 IBM Corporation */ 8/* Copyright (C) 1999 IBM Corporation */
9/* Copyright (C) 2003 Adaptec, Inc. */ 9/* Copyright (C) 2003 Adaptec, Inc. */
10/* */ 10/* */
11/* This program is free software; you can redistribute it and/or modify */ 11/* This program is free software; you can redistribute it and/or modify */
12/* it under the terms of the GNU General Public License as published by */ 12/* it under the terms of the GNU General Public License as published by */
@@ -1033,14 +1033,14 @@ typedef struct ips_scb_queue {
1033 * Wait queue_format 1033 * Wait queue_format
1034 */ 1034 */
1035typedef struct ips_wait_queue { 1035typedef struct ips_wait_queue {
1036 Scsi_Cmnd *head; 1036 struct scsi_cmnd *head;
1037 Scsi_Cmnd *tail; 1037 struct scsi_cmnd *tail;
1038 int count; 1038 int count;
1039} ips_wait_queue_t; 1039} ips_wait_queue_t;
1040 1040
1041typedef struct ips_copp_wait_item { 1041typedef struct ips_copp_wait_item {
1042 Scsi_Cmnd *scsi_cmd; 1042 struct scsi_cmnd *scsi_cmd;
1043 struct ips_copp_wait_item *next; 1043 struct ips_copp_wait_item *next;
1044} ips_copp_wait_item_t; 1044} ips_copp_wait_item_t;
1045 1045
1046typedef struct ips_copp_queue { 1046typedef struct ips_copp_queue {
@@ -1149,7 +1149,7 @@ typedef struct ips_scb {
1149 uint32_t flags; 1149 uint32_t flags;
1150 uint32_t op_code; 1150 uint32_t op_code;
1151 IPS_SG_LIST sg_list; 1151 IPS_SG_LIST sg_list;
1152 Scsi_Cmnd *scsi_cmd; 1152 struct scsi_cmnd *scsi_cmd;
1153 struct ips_scb *q_next; 1153 struct ips_scb *q_next;
1154 ips_scb_callback callback; 1154 ips_scb_callback callback;
1155 uint32_t sg_busaddr; 1155 uint32_t sg_busaddr;
@@ -1175,7 +1175,7 @@ typedef struct ips_scb_pt {
1175 uint32_t flags; 1175 uint32_t flags;
1176 uint32_t op_code; 1176 uint32_t op_code;
1177 IPS_SG_LIST *sg_list; 1177 IPS_SG_LIST *sg_list;
1178 Scsi_Cmnd *scsi_cmd; 1178 struct scsi_cmnd *scsi_cmd;
1179 struct ips_scb *q_next; 1179 struct ips_scb *q_next;
1180 ips_scb_callback callback; 1180 ips_scb_callback callback;
1181} ips_scb_pt_t; 1181} ips_scb_pt_t;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4cdf3464267..a5723ad0a09 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -389,7 +389,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
389 389
390 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 390 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
391 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
392 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) { 392 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
393 if (rc != MBX_SUCCESS) {
393 lpfc_printf_log(phba, 394 lpfc_printf_log(phba,
394 KERN_ERR, 395 KERN_ERR,
395 LOG_INIT, 396 LOG_INIT,
@@ -406,7 +407,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
406 readl(phba->HAregaddr); /* flush */ 407 readl(phba->HAregaddr); /* flush */
407 408
408 phba->hba_state = LPFC_HBA_ERROR; 409 phba->hba_state = LPFC_HBA_ERROR;
409 mempool_free(pmb, phba->mbox_mem_pool); 410 if (rc != MBX_BUSY)
411 mempool_free(pmb, phba->mbox_mem_pool);
410 return -EIO; 412 return -EIO;
411 } 413 }
412 /* MBOX buffer will be freed in mbox compl */ 414 /* MBOX buffer will be freed in mbox compl */
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 8cd0bd1d0f7..b50e27e6602 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -175,7 +175,7 @@ typedef struct {
175 uint8_t max_lun; 175 uint8_t max_lun;
176 176
177 uint32_t unique_id; 177 uint32_t unique_id;
178 uint8_t irq; 178 int irq;
179 uint8_t ito; 179 uint8_t ito;
180 caddr_t ibuf; 180 caddr_t ibuf;
181 dma_addr_t ibuf_dma_h; 181 dma_addr_t ibuf_dma_h;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 4cab5b534b2..977b6e8d852 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.03.01 13 * Version : v00.00.03.05
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -71,6 +71,8 @@ static struct megasas_mgmt_info megasas_mgmt_info;
71static struct fasync_struct *megasas_async_queue; 71static struct fasync_struct *megasas_async_queue;
72static DEFINE_MUTEX(megasas_async_queue_mutex); 72static DEFINE_MUTEX(megasas_async_queue_mutex);
73 73
74static u32 megasas_dbg_lvl;
75
74/** 76/**
75 * megasas_get_cmd - Get a command from the free pool 77 * megasas_get_cmd - Get a command from the free pool
76 * @instance: Adapter soft state 78 * @instance: Adapter soft state
@@ -135,6 +137,19 @@ megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
135} 137}
136 138
137/** 139/**
140 * megasas_disable_intr_xscale -Disables interrupt
141 * @regs: MFI register set
142 */
143static inline void
144megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs)
145{
146 u32 mask = 0x1f;
147 writel(mask, &regs->outbound_intr_mask);
148 /* Dummy readl to force pci flush */
149 readl(&regs->outbound_intr_mask);
150}
151
152/**
138 * megasas_read_fw_status_reg_xscale - returns the current FW status value 153 * megasas_read_fw_status_reg_xscale - returns the current FW status value
139 * @regs: MFI register set 154 * @regs: MFI register set
140 */ 155 */
@@ -185,6 +200,7 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
185 200
186 .fire_cmd = megasas_fire_cmd_xscale, 201 .fire_cmd = megasas_fire_cmd_xscale,
187 .enable_intr = megasas_enable_intr_xscale, 202 .enable_intr = megasas_enable_intr_xscale,
203 .disable_intr = megasas_disable_intr_xscale,
188 .clear_intr = megasas_clear_intr_xscale, 204 .clear_intr = megasas_clear_intr_xscale,
189 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 205 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
190}; 206};
@@ -215,6 +231,19 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
215} 231}
216 232
217/** 233/**
234 * megasas_disable_intr_ppc - Disable interrupt
235 * @regs: MFI register set
236 */
237static inline void
238megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs)
239{
240 u32 mask = 0xFFFFFFFF;
241 writel(mask, &regs->outbound_intr_mask);
242 /* Dummy readl to force pci flush */
243 readl(&regs->outbound_intr_mask);
244}
245
246/**
218 * megasas_read_fw_status_reg_ppc - returns the current FW status value 247 * megasas_read_fw_status_reg_ppc - returns the current FW status value
219 * @regs: MFI register set 248 * @regs: MFI register set
220 */ 249 */
@@ -265,6 +294,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
265 294
266 .fire_cmd = megasas_fire_cmd_ppc, 295 .fire_cmd = megasas_fire_cmd_ppc,
267 .enable_intr = megasas_enable_intr_ppc, 296 .enable_intr = megasas_enable_intr_ppc,
297 .disable_intr = megasas_disable_intr_ppc,
268 .clear_intr = megasas_clear_intr_ppc, 298 .clear_intr = megasas_clear_intr_ppc,
269 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 299 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
270}; 300};
@@ -275,25 +305,6 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
275*/ 305*/
276 306
277/** 307/**
278 * megasas_disable_intr - Disables interrupts
279 * @regs: MFI register set
280 */
281static inline void
282megasas_disable_intr(struct megasas_instance *instance)
283{
284 u32 mask = 0x1f;
285 struct megasas_register_set __iomem *regs = instance->reg_set;
286
287 if(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078R)
288 mask = 0xffffffff;
289
290 writel(mask, &regs->outbound_intr_mask);
291
292 /* Dummy readl to force pci flush */
293 readl(&regs->outbound_intr_mask);
294}
295
296/**
297 * megasas_issue_polled - Issues a polling command 308 * megasas_issue_polled - Issues a polling command
298 * @instance: Adapter soft state 309 * @instance: Adapter soft state
299 * @cmd: Command packet to be issued 310 * @cmd: Command packet to be issued
@@ -336,6 +347,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
336 * @cmd: Command to be issued 347 * @cmd: Command to be issued
337 * 348 *
338 * This function waits on an event for the command to be returned from ISR. 349 * This function waits on an event for the command to be returned from ISR.
350 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
339 * Used to issue ioctl commands. 351 * Used to issue ioctl commands.
340 */ 352 */
341static int 353static int
@@ -346,7 +358,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
346 358
347 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); 359 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
348 360
349 wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA)); 361 wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA),
362 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
350 363
351 return 0; 364 return 0;
352} 365}
@@ -358,7 +371,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
358 * 371 *
359 * MFI firmware can abort previously issued AEN comamnd (automatic event 372 * MFI firmware can abort previously issued AEN comamnd (automatic event
360 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 373 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
361 * cmd and blocks till it is completed. 374 * cmd and waits for return status.
375 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
362 */ 376 */
363static int 377static int
364megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 378megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
@@ -392,7 +406,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
392 /* 406 /*
393 * Wait for this cmd to complete 407 * Wait for this cmd to complete
394 */ 408 */
395 wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF)); 409 wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF),
410 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
396 411
397 megasas_return_cmd(instance, cmd); 412 megasas_return_cmd(instance, cmd);
398 return 0; 413 return 0;
@@ -495,6 +510,46 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
495 return sge_count; 510 return sge_count;
496} 511}
497 512
513 /**
514 * megasas_get_frame_count - Computes the number of frames
515 * @sge_count : number of sg elements
516 *
517 * Returns the number of frames required for numnber of sge's (sge_count)
518 */
519
520u32 megasas_get_frame_count(u8 sge_count)
521{
522 int num_cnt;
523 int sge_bytes;
524 u32 sge_sz;
525 u32 frame_count=0;
526
527 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
528 sizeof(struct megasas_sge32);
529
530 /*
531 * Main frame can contain 2 SGEs for 64-bit SGLs and
532 * 3 SGEs for 32-bit SGLs
533 */
534 if (IS_DMA64)
535 num_cnt = sge_count - 2;
536 else
537 num_cnt = sge_count - 3;
538
539 if(num_cnt>0){
540 sge_bytes = sge_sz * num_cnt;
541
542 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
543 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
544 }
545 /* Main frame */
546 frame_count +=1;
547
548 if (frame_count > 7)
549 frame_count = 8;
550 return frame_count;
551}
552
498/** 553/**
499 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 554 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
500 * @instance: Adapter soft state 555 * @instance: Adapter soft state
@@ -508,8 +563,6 @@ static int
508megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 563megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
509 struct megasas_cmd *cmd) 564 struct megasas_cmd *cmd)
510{ 565{
511 u32 sge_sz;
512 int sge_bytes;
513 u32 is_logical; 566 u32 is_logical;
514 u32 device_id; 567 u32 device_id;
515 u16 flags = 0; 568 u16 flags = 0;
@@ -544,9 +597,6 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
544 /* 597 /*
545 * Construct SGL 598 * Construct SGL
546 */ 599 */
547 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
548 sizeof(struct megasas_sge32);
549
550 if (IS_DMA64) { 600 if (IS_DMA64) {
551 pthru->flags |= MFI_FRAME_SGL64; 601 pthru->flags |= MFI_FRAME_SGL64;
552 pthru->sge_count = megasas_make_sgl64(instance, scp, 602 pthru->sge_count = megasas_make_sgl64(instance, scp,
@@ -562,17 +612,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
562 pthru->sense_buf_phys_addr_hi = 0; 612 pthru->sense_buf_phys_addr_hi = 0;
563 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 613 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
564 614
565 sge_bytes = sge_sz * pthru->sge_count;
566
567 /* 615 /*
568 * Compute the total number of frames this command consumes. FW uses 616 * Compute the total number of frames this command consumes. FW uses
569 * this number to pull sufficient number of frames from host memory. 617 * this number to pull sufficient number of frames from host memory.
570 */ 618 */
571 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 619 cmd->frame_count = megasas_get_frame_count(pthru->sge_count);
572 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
573
574 if (cmd->frame_count > 7)
575 cmd->frame_count = 8;
576 620
577 return cmd->frame_count; 621 return cmd->frame_count;
578} 622}
@@ -589,8 +633,6 @@ static int
589megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 633megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
590 struct megasas_cmd *cmd) 634 struct megasas_cmd *cmd)
591{ 635{
592 u32 sge_sz;
593 int sge_bytes;
594 u32 device_id; 636 u32 device_id;
595 u8 sc = scp->cmnd[0]; 637 u8 sc = scp->cmnd[0];
596 u16 flags = 0; 638 u16 flags = 0;
@@ -605,7 +647,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
605 flags = MFI_FRAME_DIR_READ; 647 flags = MFI_FRAME_DIR_READ;
606 648
607 /* 649 /*
608 * Preare the Logical IO frame: 2nd bit is zero for all read cmds 650 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
609 */ 651 */
610 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 652 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
611 ldio->cmd_status = 0x0; 653 ldio->cmd_status = 0x0;
@@ -674,9 +716,6 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
674 /* 716 /*
675 * Construct SGL 717 * Construct SGL
676 */ 718 */
677 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
678 sizeof(struct megasas_sge32);
679
680 if (IS_DMA64) { 719 if (IS_DMA64) {
681 ldio->flags |= MFI_FRAME_SGL64; 720 ldio->flags |= MFI_FRAME_SGL64;
682 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 721 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
@@ -690,13 +729,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
690 ldio->sense_buf_phys_addr_hi = 0; 729 ldio->sense_buf_phys_addr_hi = 0;
691 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 730 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
692 731
693 sge_bytes = sge_sz * ldio->sge_count; 732 /*
694 733 * Compute the total number of frames this command consumes. FW uses
695 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 734 * this number to pull sufficient number of frames from host memory.
696 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; 735 */
697 736 cmd->frame_count = megasas_get_frame_count(ldio->sge_count);
698 if (cmd->frame_count > 7)
699 cmd->frame_count = 8;
700 737
701 return cmd->frame_count; 738 return cmd->frame_count;
702} 739}
@@ -727,6 +764,69 @@ static inline int megasas_is_ldio(struct scsi_cmnd *cmd)
727 } 764 }
728} 765}
729 766
767 /**
768 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
769 * in FW
770 * @instance: Adapter soft state
771 */
772static inline void
773megasas_dump_pending_frames(struct megasas_instance *instance)
774{
775 struct megasas_cmd *cmd;
776 int i,n;
777 union megasas_sgl *mfi_sgl;
778 struct megasas_io_frame *ldio;
779 struct megasas_pthru_frame *pthru;
780 u32 sgcount;
781 u32 max_cmd = instance->max_fw_cmds;
782
783 printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
784 printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
785 if (IS_DMA64)
786 printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
787 else
788 printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
789
790 printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
791 for (i = 0; i < max_cmd; i++) {
792 cmd = instance->cmd_list[i];
793 if(!cmd->scmd)
794 continue;
795 printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
796 if (megasas_is_ldio(cmd->scmd)){
797 ldio = (struct megasas_io_frame *)cmd->frame;
798 mfi_sgl = &ldio->sgl;
799 sgcount = ldio->sge_count;
800 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
801 }
802 else {
803 pthru = (struct megasas_pthru_frame *) cmd->frame;
804 mfi_sgl = &pthru->sgl;
805 sgcount = pthru->sge_count;
806 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
807 }
808 if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
809 for (n = 0; n < sgcount; n++){
810 if (IS_DMA64)
811 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
812 else
813 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
814 }
815 }
816 printk(KERN_ERR "\n");
817 } /*for max_cmd*/
818 printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
819 for (i = 0; i < max_cmd; i++) {
820
821 cmd = instance->cmd_list[i];
822
823 if(cmd->sync_cmd == 1){
824 printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
825 }
826 }
827 printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
828}
829
730/** 830/**
731 * megasas_queue_command - Queue entry point 831 * megasas_queue_command - Queue entry point
732 * @scmd: SCSI command to be queued 832 * @scmd: SCSI command to be queued
@@ -832,6 +932,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
832 } 932 }
833 933
834 if (atomic_read(&instance->fw_outstanding)) { 934 if (atomic_read(&instance->fw_outstanding)) {
935 /*
936 * Send signal to FW to stop processing any pending cmds.
937 * The controller will be taken offline by the OS now.
938 */
939 writel(MFI_STOP_ADP,
940 &instance->reg_set->inbound_doorbell);
941 megasas_dump_pending_frames(instance);
835 instance->hw_crit_error = 1; 942 instance->hw_crit_error = 1;
836 return FAILED; 943 return FAILED;
837 } 944 }
@@ -1168,11 +1275,6 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1168static int 1275static int
1169megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) 1276megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
1170{ 1277{
1171 u32 producer;
1172 u32 consumer;
1173 u32 context;
1174 struct megasas_cmd *cmd;
1175
1176 /* 1278 /*
1177 * Check if it is our interrupt 1279 * Check if it is our interrupt
1178 * Clear the interrupt 1280 * Clear the interrupt
@@ -1180,23 +1282,10 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
1180 if(instance->instancet->clear_intr(instance->reg_set)) 1282 if(instance->instancet->clear_intr(instance->reg_set))
1181 return IRQ_NONE; 1283 return IRQ_NONE;
1182 1284
1183 producer = *instance->producer; 1285 /*
1184 consumer = *instance->consumer; 1286 * Schedule the tasklet for cmd completion
1185 1287 */
1186 while (consumer != producer) { 1288 tasklet_schedule(&instance->isr_tasklet);
1187 context = instance->reply_queue[consumer];
1188
1189 cmd = instance->cmd_list[context];
1190
1191 megasas_complete_cmd(instance, cmd, alt_status);
1192
1193 consumer++;
1194 if (consumer == (instance->max_fw_cmds + 1)) {
1195 consumer = 0;
1196 }
1197 }
1198
1199 *instance->consumer = producer;
1200 1289
1201 return IRQ_HANDLED; 1290 return IRQ_HANDLED;
1202} 1291}
@@ -1229,10 +1318,12 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1229 1318
1230 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 1319 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
1231 1320
1321 if (fw_state != MFI_STATE_READY)
1322 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
1323 " state\n");
1324
1232 while (fw_state != MFI_STATE_READY) { 1325 while (fw_state != MFI_STATE_READY) {
1233 1326
1234 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
1235 " state\n");
1236 switch (fw_state) { 1327 switch (fw_state) {
1237 1328
1238 case MFI_STATE_FAULT: 1329 case MFI_STATE_FAULT:
@@ -1244,19 +1335,27 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1244 /* 1335 /*
1245 * Set the CLR bit in inbound doorbell 1336 * Set the CLR bit in inbound doorbell
1246 */ 1337 */
1247 writel(MFI_INIT_CLEAR_HANDSHAKE, 1338 writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
1248 &instance->reg_set->inbound_doorbell); 1339 &instance->reg_set->inbound_doorbell);
1249 1340
1250 max_wait = 2; 1341 max_wait = 2;
1251 cur_state = MFI_STATE_WAIT_HANDSHAKE; 1342 cur_state = MFI_STATE_WAIT_HANDSHAKE;
1252 break; 1343 break;
1253 1344
1345 case MFI_STATE_BOOT_MESSAGE_PENDING:
1346 writel(MFI_INIT_HOTPLUG,
1347 &instance->reg_set->inbound_doorbell);
1348
1349 max_wait = 10;
1350 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
1351 break;
1352
1254 case MFI_STATE_OPERATIONAL: 1353 case MFI_STATE_OPERATIONAL:
1255 /* 1354 /*
1256 * Bring it to READY state; assuming max wait 2 secs 1355 * Bring it to READY state; assuming max wait 10 secs
1257 */ 1356 */
1258 megasas_disable_intr(instance); 1357 instance->instancet->disable_intr(instance->reg_set);
1259 writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell); 1358 writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell);
1260 1359
1261 max_wait = 10; 1360 max_wait = 10;
1262 cur_state = MFI_STATE_OPERATIONAL; 1361 cur_state = MFI_STATE_OPERATIONAL;
@@ -1323,6 +1422,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1323 return -ENODEV; 1422 return -ENODEV;
1324 } 1423 }
1325 }; 1424 };
1425 printk(KERN_INFO "megasas: FW now in Ready state\n");
1326 1426
1327 return 0; 1427 return 0;
1328} 1428}
@@ -1352,7 +1452,7 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
1352 cmd->frame_phys_addr); 1452 cmd->frame_phys_addr);
1353 1453
1354 if (cmd->sense) 1454 if (cmd->sense)
1355 pci_pool_free(instance->sense_dma_pool, cmd->frame, 1455 pci_pool_free(instance->sense_dma_pool, cmd->sense,
1356 cmd->sense_phys_addr); 1456 cmd->sense_phys_addr);
1357 } 1457 }
1358 1458
@@ -1628,6 +1728,39 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
1628} 1728}
1629 1729
1630/** 1730/**
1731 * megasas_complete_cmd_dpc - Returns FW's controller structure
1732 * @instance_addr: Address of adapter soft state
1733 *
1734 * Tasklet to complete cmds
1735 */
1736void megasas_complete_cmd_dpc(unsigned long instance_addr)
1737{
1738 u32 producer;
1739 u32 consumer;
1740 u32 context;
1741 struct megasas_cmd *cmd;
1742 struct megasas_instance *instance = (struct megasas_instance *)instance_addr;
1743
1744 producer = *instance->producer;
1745 consumer = *instance->consumer;
1746
1747 while (consumer != producer) {
1748 context = instance->reply_queue[consumer];
1749
1750 cmd = instance->cmd_list[context];
1751
1752 megasas_complete_cmd(instance, cmd, DID_OK);
1753
1754 consumer++;
1755 if (consumer == (instance->max_fw_cmds + 1)) {
1756 consumer = 0;
1757 }
1758 }
1759
1760 *instance->consumer = producer;
1761}
1762
1763/**
1631 * megasas_init_mfi - Initializes the FW 1764 * megasas_init_mfi - Initializes the FW
1632 * @instance: Adapter soft state 1765 * @instance: Adapter soft state
1633 * 1766 *
@@ -1690,6 +1823,12 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1690 * Get various operational parameters from status register 1823 * Get various operational parameters from status register
1691 */ 1824 */
1692 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 1825 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
1826 /*
1827 * Reduce the max supported cmds by 1. This is to ensure that the
1828 * reply_q_sz (1 more than the max cmd that driver may send)
1829 * does not exceed max cmds that the FW can support
1830 */
1831 instance->max_fw_cmds = instance->max_fw_cmds-1;
1693 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 1832 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
1694 0x10; 1833 0x10;
1695 /* 1834 /*
@@ -1754,7 +1893,7 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1754 /* 1893 /*
1755 * disable the intr before firing the init frame to FW 1894 * disable the intr before firing the init frame to FW
1756 */ 1895 */
1757 megasas_disable_intr(instance); 1896 instance->instancet->disable_intr(instance->reg_set);
1758 1897
1759 /* 1898 /*
1760 * Issue the init frame in polled mode 1899 * Issue the init frame in polled mode
@@ -1791,6 +1930,12 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1791 1930
1792 kfree(ctrl_info); 1931 kfree(ctrl_info);
1793 1932
1933 /*
1934 * Setup tasklet for cmd completion
1935 */
1936
1937 tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
1938 (unsigned long)instance);
1794 return 0; 1939 return 0;
1795 1940
1796 fail_fw_init: 1941 fail_fw_init:
@@ -2182,6 +2327,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2182 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 2327 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
2183 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 2328 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
2184 2329
2330 megasas_dbg_lvl = 0;
2331
2185 /* 2332 /*
2186 * Initialize MFI Firmware 2333 * Initialize MFI Firmware
2187 */ 2334 */
@@ -2234,7 +2381,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2234 megasas_mgmt_info.max_index--; 2381 megasas_mgmt_info.max_index--;
2235 2382
2236 pci_set_drvdata(pdev, NULL); 2383 pci_set_drvdata(pdev, NULL);
2237 megasas_disable_intr(instance); 2384 instance->instancet->disable_intr(instance->reg_set);
2238 free_irq(instance->pdev->irq, instance); 2385 free_irq(instance->pdev->irq, instance);
2239 2386
2240 megasas_release_mfi(instance); 2387 megasas_release_mfi(instance);
@@ -2348,6 +2495,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
2348 scsi_remove_host(instance->host); 2495 scsi_remove_host(instance->host);
2349 megasas_flush_cache(instance); 2496 megasas_flush_cache(instance);
2350 megasas_shutdown_controller(instance); 2497 megasas_shutdown_controller(instance);
2498 tasklet_kill(&instance->isr_tasklet);
2351 2499
2352 /* 2500 /*
2353 * Take the instance off the instance array. Note that we will not 2501 * Take the instance off the instance array. Note that we will not
@@ -2364,7 +2512,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
2364 2512
2365 pci_set_drvdata(instance->pdev, NULL); 2513 pci_set_drvdata(instance->pdev, NULL);
2366 2514
2367 megasas_disable_intr(instance); 2515 instance->instancet->disable_intr(instance->reg_set);
2368 2516
2369 free_irq(instance->pdev->irq, instance); 2517 free_irq(instance->pdev->irq, instance);
2370 2518
@@ -2716,7 +2864,8 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2716 int i; 2864 int i;
2717 int error = 0; 2865 int error = 0;
2718 2866
2719 clear_user(ioc, sizeof(*ioc)); 2867 if (clear_user(ioc, sizeof(*ioc)))
2868 return -EFAULT;
2720 2869
2721 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 2870 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
2722 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 2871 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
@@ -2808,6 +2957,26 @@ megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
2808static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, 2957static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
2809 NULL); 2958 NULL);
2810 2959
2960static ssize_t
2961megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
2962{
2963 return sprintf(buf,"%u",megasas_dbg_lvl);
2964}
2965
2966static ssize_t
2967megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
2968{
2969 int retval = count;
2970 if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
2971 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
2972 retval = -EINVAL;
2973 }
2974 return retval;
2975}
2976
2977static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
2978 megasas_sysfs_set_dbg_lvl);
2979
2811/** 2980/**
2812 * megasas_init - Driver load entry point 2981 * megasas_init - Driver load entry point
2813 */ 2982 */
@@ -2842,14 +3011,33 @@ static int __init megasas_init(void)
2842 3011
2843 if (rval) { 3012 if (rval) {
2844 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); 3013 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
2845 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 3014 goto err_pcidrv;
2846 } 3015 }
2847 3016
2848 driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); 3017 rval = driver_create_file(&megasas_pci_driver.driver,
2849 driver_create_file(&megasas_pci_driver.driver, 3018 &driver_attr_version);
2850 &driver_attr_release_date); 3019 if (rval)
3020 goto err_dcf_attr_ver;
3021 rval = driver_create_file(&megasas_pci_driver.driver,
3022 &driver_attr_release_date);
3023 if (rval)
3024 goto err_dcf_rel_date;
3025 rval = driver_create_file(&megasas_pci_driver.driver,
3026 &driver_attr_dbg_lvl);
3027 if (rval)
3028 goto err_dcf_dbg_lvl;
2851 3029
2852 return rval; 3030 return rval;
3031err_dcf_dbg_lvl:
3032 driver_remove_file(&megasas_pci_driver.driver,
3033 &driver_attr_release_date);
3034err_dcf_rel_date:
3035 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
3036err_dcf_attr_ver:
3037 pci_unregister_driver(&megasas_pci_driver);
3038err_pcidrv:
3039 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
3040 return rval;
2853} 3041}
2854 3042
2855/** 3043/**
@@ -2857,9 +3045,11 @@ static int __init megasas_init(void)
2857 */ 3045 */
2858static void __exit megasas_exit(void) 3046static void __exit megasas_exit(void)
2859{ 3047{
2860 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 3048 driver_remove_file(&megasas_pci_driver.driver,
3049 &driver_attr_dbg_lvl);
2861 driver_remove_file(&megasas_pci_driver.driver, 3050 driver_remove_file(&megasas_pci_driver.driver,
2862 &driver_attr_release_date); 3051 &driver_attr_release_date);
3052 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
2863 3053
2864 pci_unregister_driver(&megasas_pci_driver); 3054 pci_unregister_driver(&megasas_pci_driver);
2865 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 3055 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3531a14222a..55eddcf8eb1 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.01" 21#define MEGASAS_VERSION "00.00.03.05"
22#define MEGASAS_RELDATE "May 14, 2006" 22#define MEGASAS_RELDATE "Oct 02, 2006"
23#define MEGASAS_EXT_VERSION "Sun May 14 22:49:52 PDT 2006" 23#define MEGASAS_EXT_VERSION "Mon Oct 02 11:21:32 PDT 2006"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -50,6 +50,7 @@
50#define MFI_STATE_WAIT_HANDSHAKE 0x60000000 50#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
51#define MFI_STATE_FW_INIT_2 0x70000000 51#define MFI_STATE_FW_INIT_2 0x70000000
52#define MFI_STATE_DEVICE_SCAN 0x80000000 52#define MFI_STATE_DEVICE_SCAN 0x80000000
53#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
53#define MFI_STATE_FLUSH_CACHE 0xA0000000 54#define MFI_STATE_FLUSH_CACHE 0xA0000000
54#define MFI_STATE_READY 0xB0000000 55#define MFI_STATE_READY 0xB0000000
55#define MFI_STATE_OPERATIONAL 0xC0000000 56#define MFI_STATE_OPERATIONAL 0xC0000000
@@ -64,12 +65,18 @@
64 * READY : Move from OPERATIONAL to READY state; discard queue info 65 * READY : Move from OPERATIONAL to READY state; discard queue info
65 * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??) 66 * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
66 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver 67 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
68 * HOTPLUG : Resume from Hotplug
69 * MFI_STOP_ADP : Send signal to FW to stop processing
67 */ 70 */
68#define MFI_INIT_ABORT 0x00000000 71#define MFI_INIT_ABORT 0x00000001
69#define MFI_INIT_READY 0x00000002 72#define MFI_INIT_READY 0x00000002
70#define MFI_INIT_MFIMODE 0x00000004 73#define MFI_INIT_MFIMODE 0x00000004
71#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 74#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
72#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE 75#define MFI_INIT_HOTPLUG 0x00000010
76#define MFI_STOP_ADP 0x00000020
77#define MFI_RESET_FLAGS MFI_INIT_READY| \
78 MFI_INIT_MFIMODE| \
79 MFI_INIT_ABORT
73 80
74/** 81/**
75 * MFI frame flags 82 * MFI frame flags
@@ -530,6 +537,8 @@ struct megasas_ctrl_info {
530#define MEGASAS_MAX_LUN 8 537#define MEGASAS_MAX_LUN 8
531#define MEGASAS_MAX_LD 64 538#define MEGASAS_MAX_LD 64
532 539
540#define MEGASAS_DBG_LVL 1
541
533/* 542/*
534 * When SCSI mid-layer calls driver's reset routine, driver waits for 543 * When SCSI mid-layer calls driver's reset routine, driver waits for
535 * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note 544 * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
@@ -538,6 +547,7 @@ struct megasas_ctrl_info {
538 * every MEGASAS_RESET_NOTICE_INTERVAL seconds 547 * every MEGASAS_RESET_NOTICE_INTERVAL seconds
539 */ 548 */
540#define MEGASAS_RESET_WAIT_TIME 180 549#define MEGASAS_RESET_WAIT_TIME 180
550#define MEGASAS_INTERNAL_CMD_WAIT_TIME 180
541#define MEGASAS_RESET_NOTICE_INTERVAL 5 551#define MEGASAS_RESET_NOTICE_INTERVAL 5
542 552
543#define MEGASAS_IOCTL_CMD 0 553#define MEGASAS_IOCTL_CMD 0
@@ -1042,6 +1052,7 @@ struct megasas_evt_detail {
1042 void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *); 1052 void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
1043 1053
1044 void (*enable_intr)(struct megasas_register_set __iomem *) ; 1054 void (*enable_intr)(struct megasas_register_set __iomem *) ;
1055 void (*disable_intr)(struct megasas_register_set __iomem *);
1045 1056
1046 int (*clear_intr)(struct megasas_register_set __iomem *); 1057 int (*clear_intr)(struct megasas_register_set __iomem *);
1047 1058
@@ -1092,6 +1103,7 @@ struct megasas_instance {
1092 u32 hw_crit_error; 1103 u32 hw_crit_error;
1093 1104
1094 struct megasas_instance_template *instancet; 1105 struct megasas_instance_template *instancet;
1106 struct tasklet_struct isr_tasklet;
1095}; 1107};
1096 1108
1097#define MEGASAS_IS_LOGICAL(scp) \ 1109#define MEGASAS_IS_LOGICAL(scp) \
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index bfb4f49e125..1c624ce8189 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -3581,7 +3581,7 @@ static struct pci_driver nsp32_driver = {
3581 */ 3581 */
3582static int __init init_nsp32(void) { 3582static int __init init_nsp32(void) {
3583 nsp32_msg(KERN_INFO, "loading..."); 3583 nsp32_msg(KERN_INFO, "loading...");
3584 return pci_module_init(&nsp32_driver); 3584 return pci_register_driver(&nsp32_driver);
3585} 3585}
3586 3586
3587static void __exit exit_nsp32(void) { 3587static void __exit exit_nsp32(void) {
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index 5addf9fb1e1..a976e8193d1 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -619,47 +619,5 @@ typedef struct _nsp32_hw_data {
619#define REQSACK_TIMEOUT_TIME 10000 /* max wait time for REQ/SACK assertion 619#define REQSACK_TIMEOUT_TIME 10000 /* max wait time for REQ/SACK assertion
620 or negation, 10000us == 10ms */ 620 or negation, 10000us == 10ms */
621 621
622/**************************************************************************
623 * Compatibility functions
624 */
625
626/* for Kernel 2.4 */
627#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
628# define scsi_register_host(template) scsi_register_module(MODULE_SCSI_HA, template)
629# define scsi_unregister_host(template) scsi_unregister_module(MODULE_SCSI_HA, template)
630# define scsi_host_put(host) scsi_unregister(host)
631# define pci_name(pci_dev) ((pci_dev)->slot_name)
632
633typedef void irqreturn_t;
634# define IRQ_NONE /* */
635# define IRQ_HANDLED /* */
636# define IRQ_RETVAL(x) /* */
637
638/* This is ad-hoc version of scsi_host_get_next() */
639static inline struct Scsi_Host *scsi_host_get_next(struct Scsi_Host *host)
640{
641 if (host == NULL) {
642 return scsi_hostlist;
643 } else {
644 return host->next;
645 }
646}
647
648/* This is ad-hoc version of scsi_host_hn_get() */
649static inline struct Scsi_Host *scsi_host_hn_get(unsigned short hostno)
650{
651 struct Scsi_Host *host;
652
653 for (host = scsi_host_get_next(NULL); host != NULL;
654 host = scsi_host_get_next(host)) {
655 if (host->host_no == hostno) {
656 break;
657 }
658 }
659
660 return host;
661}
662#endif
663
664#endif /* _NSP32_H */ 622#endif /* _NSP32_H */
665/* end */ 623/* end */
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 4a2fed350d4..824fe080d1d 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4843,8 +4843,7 @@ static int os_scsi_tape_close(struct inode * inode, struct file * filp)
4843static int osst_ioctl(struct inode * inode,struct file * file, 4843static int osst_ioctl(struct inode * inode,struct file * file,
4844 unsigned int cmd_in, unsigned long arg) 4844 unsigned int cmd_in, unsigned long arg)
4845{ 4845{
4846 int i, cmd_nr, cmd_type, retval = 0; 4846 int i, cmd_nr, cmd_type, blk, retval = 0;
4847 unsigned int blk;
4848 struct st_modedef * STm; 4847 struct st_modedef * STm;
4849 struct st_partstat * STps; 4848 struct st_partstat * STps;
4850 struct osst_request * SRpnt = NULL; 4849 struct osst_request * SRpnt = NULL;
@@ -5207,12 +5206,12 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
5207 priority = GFP_KERNEL; 5206 priority = GFP_KERNEL;
5208 5207
5209 i = sizeof(struct osst_buffer) + (osst_max_sg_segs - 1) * sizeof(struct scatterlist); 5208 i = sizeof(struct osst_buffer) + (osst_max_sg_segs - 1) * sizeof(struct scatterlist);
5210 tb = (struct osst_buffer *)kmalloc(i, priority); 5209 tb = kzalloc(i, priority);
5211 if (!tb) { 5210 if (!tb) {
5212 printk(KERN_NOTICE "osst :I: Can't allocate new tape buffer.\n"); 5211 printk(KERN_NOTICE "osst :I: Can't allocate new tape buffer.\n");
5213 return NULL; 5212 return NULL;
5214 } 5213 }
5215 memset(tb, 0, i); 5214
5216 tb->sg_segs = tb->orig_sg_segs = 0; 5215 tb->sg_segs = tb->orig_sg_segs = 0;
5217 tb->use_sg = max_sg; 5216 tb->use_sg = max_sg;
5218 tb->in_use = 1; 5217 tb->in_use = 1;
@@ -5575,9 +5574,9 @@ static ssize_t osst_version_show(struct device_driver *ddd, char *buf)
5575 5574
5576static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL); 5575static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL);
5577 5576
5578static void osst_create_driverfs_files(struct device_driver *driverfs) 5577static int osst_create_driverfs_files(struct device_driver *driverfs)
5579{ 5578{
5580 driver_create_file(driverfs, &driver_attr_version); 5579 return driver_create_file(driverfs, &driver_attr_version);
5581} 5580}
5582 5581
5583static void osst_remove_driverfs_files(struct device_driver *driverfs) 5582static void osst_remove_driverfs_files(struct device_driver *driverfs)
@@ -5663,50 +5662,70 @@ CLASS_DEVICE_ATTR(file_count, S_IRUGO, osst_filemark_cnt_show, NULL);
5663 5662
5664static struct class *osst_sysfs_class; 5663static struct class *osst_sysfs_class;
5665 5664
5666static int osst_sysfs_valid = 0; 5665static int osst_sysfs_init(void)
5667
5668static void osst_sysfs_init(void)
5669{ 5666{
5670 osst_sysfs_class = class_create(THIS_MODULE, "onstream_tape"); 5667 osst_sysfs_class = class_create(THIS_MODULE, "onstream_tape");
5671 if ( IS_ERR(osst_sysfs_class) ) 5668 if (IS_ERR(osst_sysfs_class)) {
5672 printk(KERN_WARNING "osst :W: Unable to register sysfs class\n"); 5669 printk(KERN_ERR "osst :W: Unable to register sysfs class\n");
5673 else 5670 return PTR_ERR(osst_sysfs_class);
5674 osst_sysfs_valid = 1; 5671 }
5672
5673 return 0;
5675} 5674}
5676 5675
5677static void osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name) 5676static void osst_sysfs_destroy(dev_t dev)
5678{ 5677{
5679 struct class_device *osst_class_member; 5678 class_device_destroy(osst_sysfs_class, dev);
5679}
5680 5680
5681 if (!osst_sysfs_valid) return; 5681static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name)
5682{
5683 struct class_device *osst_class_member;
5684 int err;
5682 5685
5683 osst_class_member = class_device_create(osst_sysfs_class, NULL, dev, device, "%s", name); 5686 osst_class_member = class_device_create(osst_sysfs_class, NULL, dev,
5687 device, "%s", name);
5684 if (IS_ERR(osst_class_member)) { 5688 if (IS_ERR(osst_class_member)) {
5685 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name); 5689 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
5686 return; 5690 return PTR_ERR(osst_class_member);
5687 } 5691 }
5692
5688 class_set_devdata(osst_class_member, STp); 5693 class_set_devdata(osst_class_member, STp);
5689 class_device_create_file(osst_class_member, &class_device_attr_ADR_rev); 5694 err = class_device_create_file(osst_class_member,
5690 class_device_create_file(osst_class_member, &class_device_attr_media_version); 5695 &class_device_attr_ADR_rev);
5691 class_device_create_file(osst_class_member, &class_device_attr_capacity); 5696 if (err)
5692 class_device_create_file(osst_class_member, &class_device_attr_BOT_frame); 5697 goto err_out;
5693 class_device_create_file(osst_class_member, &class_device_attr_EOD_frame); 5698 err = class_device_create_file(osst_class_member,
5694 class_device_create_file(osst_class_member, &class_device_attr_file_count); 5699 &class_device_attr_media_version);
5695} 5700 if (err)
5701 goto err_out;
5702 err = class_device_create_file(osst_class_member,
5703 &class_device_attr_capacity);
5704 if (err)
5705 goto err_out;
5706 err = class_device_create_file(osst_class_member,
5707 &class_device_attr_BOT_frame);
5708 if (err)
5709 goto err_out;
5710 err = class_device_create_file(osst_class_member,
5711 &class_device_attr_EOD_frame);
5712 if (err)
5713 goto err_out;
5714 err = class_device_create_file(osst_class_member,
5715 &class_device_attr_file_count);
5716 if (err)
5717 goto err_out;
5696 5718
5697static void osst_sysfs_destroy(dev_t dev) 5719 return 0;
5698{
5699 if (!osst_sysfs_valid) return;
5700 5720
5701 class_device_destroy(osst_sysfs_class, dev); 5721err_out:
5722 osst_sysfs_destroy(dev);
5723 return err;
5702} 5724}
5703 5725
5704static void osst_sysfs_cleanup(void) 5726static void osst_sysfs_cleanup(void)
5705{ 5727{
5706 if (osst_sysfs_valid) { 5728 class_destroy(osst_sysfs_class);
5707 class_destroy(osst_sysfs_class);
5708 osst_sysfs_valid = 0;
5709 }
5710} 5729}
5711 5730
5712/* 5731/*
@@ -5721,7 +5740,7 @@ static int osst_probe(struct device *dev)
5721 struct st_partstat * STps; 5740 struct st_partstat * STps;
5722 struct osst_buffer * buffer; 5741 struct osst_buffer * buffer;
5723 struct gendisk * drive; 5742 struct gendisk * drive;
5724 int i, dev_num; 5743 int i, dev_num, err = -ENODEV;
5725 5744
5726 if (SDp->type != TYPE_TAPE || !osst_supports(SDp)) 5745 if (SDp->type != TYPE_TAPE || !osst_supports(SDp))
5727 return -ENODEV; 5746 return -ENODEV;
@@ -5849,13 +5868,20 @@ static int osst_probe(struct device *dev)
5849 init_MUTEX(&tpnt->lock); 5868 init_MUTEX(&tpnt->lock);
5850 osst_nr_dev++; 5869 osst_nr_dev++;
5851 write_unlock(&os_scsi_tapes_lock); 5870 write_unlock(&os_scsi_tapes_lock);
5871
5852 { 5872 {
5853 char name[8]; 5873 char name[8];
5874
5854 /* Rewind entry */ 5875 /* Rewind entry */
5855 osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num), dev, tpnt, tape_name(tpnt)); 5876 err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num), dev, tpnt, tape_name(tpnt));
5877 if (err)
5878 goto out_free_buffer;
5879
5856 /* No-rewind entry */ 5880 /* No-rewind entry */
5857 snprintf(name, 8, "%s%s", "n", tape_name(tpnt)); 5881 snprintf(name, 8, "%s%s", "n", tape_name(tpnt));
5858 osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num + 128), dev, tpnt, name); 5882 err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num + 128), dev, tpnt, name);
5883 if (err)
5884 goto out_free_sysfs1;
5859 } 5885 }
5860 5886
5861 sdev_printk(KERN_INFO, SDp, 5887 sdev_printk(KERN_INFO, SDp,
@@ -5864,9 +5890,13 @@ static int osst_probe(struct device *dev)
5864 5890
5865 return 0; 5891 return 0;
5866 5892
5893out_free_sysfs1:
5894 osst_sysfs_destroy(MKDEV(OSST_MAJOR, dev_num));
5895out_free_buffer:
5896 kfree(buffer);
5867out_put_disk: 5897out_put_disk:
5868 put_disk(drive); 5898 put_disk(drive);
5869 return -ENODEV; 5899 return err;
5870}; 5900};
5871 5901
5872static int osst_remove(struct device *dev) 5902static int osst_remove(struct device *dev)
@@ -5903,19 +5933,39 @@ static int osst_remove(struct device *dev)
5903 5933
5904static int __init init_osst(void) 5934static int __init init_osst(void)
5905{ 5935{
5936 int err;
5937
5906 printk(KERN_INFO "osst :I: Tape driver with OnStream support version %s\nosst :I: %s\n", osst_version, cvsid); 5938 printk(KERN_INFO "osst :I: Tape driver with OnStream support version %s\nosst :I: %s\n", osst_version, cvsid);
5907 5939
5908 validate_options(); 5940 validate_options();
5909 osst_sysfs_init();
5910 5941
5911 if ((register_chrdev(OSST_MAJOR,"osst", &osst_fops) < 0) || scsi_register_driver(&osst_template.gendrv)) { 5942 err = osst_sysfs_init();
5943 if (err)
5944 return err;
5945
5946 err = register_chrdev(OSST_MAJOR, "osst", &osst_fops);
5947 if (err < 0) {
5912 printk(KERN_ERR "osst :E: Unable to register major %d for OnStream tapes\n", OSST_MAJOR); 5948 printk(KERN_ERR "osst :E: Unable to register major %d for OnStream tapes\n", OSST_MAJOR);
5913 osst_sysfs_cleanup(); 5949 goto err_out;
5914 return 1;
5915 } 5950 }
5916 osst_create_driverfs_files(&osst_template.gendrv); 5951
5952 err = scsi_register_driver(&osst_template.gendrv);
5953 if (err)
5954 goto err_out_chrdev;
5955
5956 err = osst_create_driverfs_files(&osst_template.gendrv);
5957 if (err)
5958 goto err_out_scsidrv;
5917 5959
5918 return 0; 5960 return 0;
5961
5962err_out_scsidrv:
5963 scsi_unregister_driver(&osst_template.gendrv);
5964err_out_chrdev:
5965 unregister_chrdev(OSST_MAJOR, "osst");
5966err_out:
5967 osst_sysfs_cleanup();
5968 return err;
5919} 5969}
5920 5970
5921static void __exit exit_osst (void) 5971static void __exit exit_osst (void)
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 0d4c04e1f3d..053303d3611 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -80,7 +80,6 @@ static int free_ports = 0;
80module_param(free_ports, bool, 0); 80module_param(free_ports, bool, 0);
81MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))"); 81MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
82 82
83/* /usr/src/linux/drivers/scsi/hosts.h */
84static struct scsi_host_template nsp_driver_template = { 83static struct scsi_host_template nsp_driver_template = {
85 .proc_name = "nsp_cs", 84 .proc_name = "nsp_cs",
86 .proc_info = nsp_proc_info, 85 .proc_info = nsp_proc_info,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 332151e2a01..9f33e5946c0 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2862,7 +2862,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2862 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2862 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2863 2863
2864 /* Set ISP command timeout. */ 2864 /* Set ISP command timeout. */
2865 pkt->timeout = cpu_to_le16(30); 2865 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
2866 2866
2867 /* Set device target ID and LUN */ 2867 /* Set device target ID and LUN */
2868 pkt->lun = SCSI_LUN_32(cmd); 2868 pkt->lun = SCSI_LUN_32(cmd);
@@ -3161,7 +3161,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3161 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3161 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3162 3162
3163 /* Set ISP command timeout. */ 3163 /* Set ISP command timeout. */
3164 pkt->timeout = cpu_to_le16(30); 3164 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
3165 3165
3166 /* Set device target ID and LUN */ 3166 /* Set device target ID and LUN */
3167 pkt->lun = SCSI_LUN_32(cmd); 3167 pkt->lun = SCSI_LUN_32(cmd);
@@ -4484,7 +4484,7 @@ qla1280_init(void)
4484 qla1280_setup(qla1280); 4484 qla1280_setup(qla1280);
4485#endif 4485#endif
4486 4486
4487 return pci_module_init(&qla1280_pci_driver); 4487 return pci_register_driver(&qla1280_pci_driver);
4488} 4488}
4489 4489
4490static void __exit 4490static void __exit
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 87f90c4f08e..ee75a71f3c6 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -691,13 +691,13 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
691 uint32_t speed = 0; 691 uint32_t speed = 0;
692 692
693 switch (ha->link_data_rate) { 693 switch (ha->link_data_rate) {
694 case LDR_1GB: 694 case PORT_SPEED_1GB:
695 speed = 1; 695 speed = 1;
696 break; 696 break;
697 case LDR_2GB: 697 case PORT_SPEED_2GB:
698 speed = 2; 698 speed = 2;
699 break; 699 break;
700 case LDR_4GB: 700 case PORT_SPEED_4GB:
701 speed = 4; 701 speed = 4;
702 break; 702 break;
703 } 703 }
@@ -849,6 +849,49 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
849 return pfc_host_stat; 849 return pfc_host_stat;
850} 850}
851 851
852static void
853qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
854{
855 scsi_qla_host_t *ha = to_qla_host(shost);
856
857 qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost));
858}
859
860static void
861qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
862{
863 scsi_qla_host_t *ha = to_qla_host(shost);
864
865 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
866}
867
868static void
869qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
870{
871 scsi_qla_host_t *ha = to_qla_host(shost);
872 u64 node_name;
873
874 if (ha->device_flags & SWITCH_FOUND)
875 node_name = wwn_to_u64(ha->fabric_node_name);
876 else
877 node_name = wwn_to_u64(ha->node_name);
878
879 fc_host_fabric_name(shost) = node_name;
880}
881
882static void
883qla2x00_get_host_port_state(struct Scsi_Host *shost)
884{
885 scsi_qla_host_t *ha = to_qla_host(shost);
886
887 if (!ha->flags.online)
888 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
889 else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT)
890 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
891 else
892 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
893}
894
852struct fc_function_template qla2xxx_transport_functions = { 895struct fc_function_template qla2xxx_transport_functions = {
853 896
854 .show_host_node_name = 1, 897 .show_host_node_name = 1,
@@ -861,6 +904,14 @@ struct fc_function_template qla2xxx_transport_functions = {
861 .show_host_speed = 1, 904 .show_host_speed = 1,
862 .get_host_port_type = qla2x00_get_host_port_type, 905 .get_host_port_type = qla2x00_get_host_port_type,
863 .show_host_port_type = 1, 906 .show_host_port_type = 1,
907 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
908 .show_host_symbolic_name = 1,
909 .set_host_system_hostname = qla2x00_set_host_system_hostname,
910 .show_host_system_hostname = 1,
911 .get_host_fabric_name = qla2x00_get_host_fabric_name,
912 .show_host_fabric_name = 1,
913 .get_host_port_state = qla2x00_get_host_port_state,
914 .show_host_port_state = 1,
864 915
865 .dd_fcrport_size = sizeof(struct fc_port *), 916 .dd_fcrport_size = sizeof(struct fc_port *),
866 .show_rport_supported_classes = 1, 917 .show_rport_supported_classes = 1,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 0930260aec2..c37a30aa214 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -608,6 +608,7 @@ typedef struct {
608 */ 608 */
609#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 609#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
610#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ 610#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
611#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
611#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */ 612#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */
612#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */ 613#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */
613#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */ 614#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */
@@ -1497,6 +1498,9 @@ typedef struct {
1497 port_id_t d_id; 1498 port_id_t d_id;
1498 uint8_t node_name[WWN_SIZE]; 1499 uint8_t node_name[WWN_SIZE];
1499 uint8_t port_name[WWN_SIZE]; 1500 uint8_t port_name[WWN_SIZE];
1501 uint8_t fabric_port_name[WWN_SIZE];
1502 uint16_t fp_speeds;
1503 uint16_t fp_speed;
1500} sw_info_t; 1504} sw_info_t;
1501 1505
1502/* 1506/*
@@ -1524,6 +1528,9 @@ typedef struct fc_port {
1524 uint16_t loop_id; 1528 uint16_t loop_id;
1525 uint16_t old_loop_id; 1529 uint16_t old_loop_id;
1526 1530
1531 uint8_t fabric_port_name[WWN_SIZE];
1532 uint16_t fp_speed;
1533
1527 fc_port_type_t port_type; 1534 fc_port_type_t port_type;
1528 1535
1529 atomic_t state; 1536 atomic_t state;
@@ -1635,6 +1642,15 @@ typedef struct fc_port {
1635#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255) 1642#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
1636#define RSNN_NN_RSP_SIZE 16 1643#define RSNN_NN_RSP_SIZE 16
1637 1644
1645#define GFPN_ID_CMD 0x11C
1646#define GFPN_ID_REQ_SIZE (16 + 4)
1647#define GFPN_ID_RSP_SIZE (16 + 8)
1648
1649#define GPSC_CMD 0x127
1650#define GPSC_REQ_SIZE (16 + 8)
1651#define GPSC_RSP_SIZE (16 + 2 + 2)
1652
1653
1638/* 1654/*
1639 * HBA attribute types. 1655 * HBA attribute types.
1640 */ 1656 */
@@ -1748,7 +1764,7 @@ struct ct_sns_req {
1748 uint8_t reserved[3]; 1764 uint8_t reserved[3];
1749 1765
1750 union { 1766 union {
1751 /* GA_NXT, GPN_ID, GNN_ID, GFT_ID */ 1767 /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
1752 struct { 1768 struct {
1753 uint8_t reserved; 1769 uint8_t reserved;
1754 uint8_t port_id[3]; 1770 uint8_t port_id[3];
@@ -1823,6 +1839,10 @@ struct ct_sns_req {
1823 struct { 1839 struct {
1824 uint8_t port_name[8]; 1840 uint8_t port_name[8];
1825 } dpa; 1841 } dpa;
1842
1843 struct {
1844 uint8_t port_name[8];
1845 } gpsc;
1826 } req; 1846 } req;
1827}; 1847};
1828 1848
@@ -1886,6 +1906,15 @@ struct ct_sns_rsp {
1886 uint8_t port_name[8]; 1906 uint8_t port_name[8];
1887 struct ct_fdmi_hba_attributes attrs; 1907 struct ct_fdmi_hba_attributes attrs;
1888 } ghat; 1908 } ghat;
1909
1910 struct {
1911 uint8_t port_name[8];
1912 } gfpn_id;
1913
1914 struct {
1915 uint16_t speeds;
1916 uint16_t speed;
1917 } gpsc;
1889 } rsp; 1918 } rsp;
1890}; 1919};
1891 1920
@@ -2182,11 +2211,11 @@ typedef struct scsi_qla_host {
2182 uint16_t max_public_loop_ids; 2211 uint16_t max_public_loop_ids;
2183 uint16_t min_external_loopid; /* First external loop Id */ 2212 uint16_t min_external_loopid; /* First external loop Id */
2184 2213
2214#define PORT_SPEED_UNKNOWN 0xFFFF
2215#define PORT_SPEED_1GB 0x00
2216#define PORT_SPEED_2GB 0x01
2217#define PORT_SPEED_4GB 0x03
2185 uint16_t link_data_rate; /* F/W operating speed */ 2218 uint16_t link_data_rate; /* F/W operating speed */
2186#define LDR_1GB 0
2187#define LDR_2GB 1
2188#define LDR_4GB 3
2189#define LDR_UNKNOWN 0xFFFF
2190 2219
2191 uint8_t current_topology; 2220 uint8_t current_topology;
2192 uint8_t prev_topology; 2221 uint8_t prev_topology;
@@ -2333,6 +2362,7 @@ typedef struct scsi_qla_host {
2333 2362
2334 uint8_t *node_name; 2363 uint8_t *node_name;
2335 uint8_t *port_name; 2364 uint8_t *port_name;
2365 uint8_t fabric_node_name[WWN_SIZE];
2336 uint32_t isp_abort_cnt; 2366 uint32_t isp_abort_cnt;
2337 2367
2338 /* Option ROM information. */ 2368 /* Option ROM information. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8311ac2b93a..bef7011378c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -208,6 +208,12 @@ qla2x00_trace_control(scsi_qla_host_t *, uint16_t, dma_addr_t, uint16_t);
208extern int 208extern int
209qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); 209qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
210 210
211extern int
212qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *);
213
214extern int
215qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
216
211/* 217/*
212 * Global Function Prototypes in qla_isr.c source file. 218 * Global Function Prototypes in qla_isr.c source file.
213 */ 219 */
@@ -279,6 +285,9 @@ extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
279extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); 285extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
280extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); 286extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
281extern int qla2x00_fdmi_register(scsi_qla_host_t *); 287extern int qla2x00_fdmi_register(scsi_qla_host_t *);
288extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
289extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
290extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
282 291
283/* 292/*
284 * Global Function Prototypes in qla_attr.c source file. 293 * Global Function Prototypes in qla_attr.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 2ebf259fccb..97fbc62ec66 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -612,6 +612,14 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
612 return (rval); 612 return (rval);
613} 613}
614 614
615void
616qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
617{
618 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
619 ha->fw_major_version, ha->fw_minor_version,
620 ha->fw_subminor_version, qla2x00_version_str);
621}
622
615/** 623/**
616 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. 624 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
617 * @ha: HA context 625 * @ha: HA context
@@ -622,9 +630,6 @@ int
622qla2x00_rsnn_nn(scsi_qla_host_t *ha) 630qla2x00_rsnn_nn(scsi_qla_host_t *ha)
623{ 631{
624 int rval; 632 int rval;
625 uint8_t *snn;
626 uint8_t version[20];
627
628 ms_iocb_entry_t *ms_pkt; 633 ms_iocb_entry_t *ms_pkt;
629 struct ct_sns_req *ct_req; 634 struct ct_sns_req *ct_req;
630 struct ct_sns_rsp *ct_rsp; 635 struct ct_sns_rsp *ct_rsp;
@@ -649,20 +654,11 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
649 memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE); 654 memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE);
650 655
651 /* Prepare the Symbolic Node Name */ 656 /* Prepare the Symbolic Node Name */
652 /* Board type */ 657 qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name);
653 snn = ct_req->req.rsnn_nn.sym_node_name;
654 strcpy(snn, ha->model_number);
655 /* Firmware version */
656 strcat(snn, " FW:v");
657 sprintf(version, "%d.%02d.%02d", ha->fw_major_version,
658 ha->fw_minor_version, ha->fw_subminor_version);
659 strcat(snn, version);
660 /* Driver version */
661 strcat(snn, " DVR:v");
662 strcat(snn, qla2x00_version_str);
663 658
664 /* Calculate SNN length */ 659 /* Calculate SNN length */
665 ct_req->req.rsnn_nn.name_len = (uint8_t)strlen(snn); 660 ct_req->req.rsnn_nn.name_len =
661 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
666 662
667 /* Update MS IOCB request */ 663 /* Update MS IOCB request */
668 ms_pkt->req_bytecount = 664 ms_pkt->req_bytecount =
@@ -687,7 +683,6 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
687 return (rval); 683 return (rval);
688} 684}
689 685
690
691/** 686/**
692 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. 687 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
693 * @ha: HA context 688 * @ha: HA context
@@ -1585,6 +1580,21 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1585 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no, 1580 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
1586 eiter->a.os_dev_name)); 1581 eiter->a.os_dev_name));
1587 1582
1583 /* Hostname. */
1584 if (strlen(fc_host_system_hostname(ha->host))) {
1585 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1586 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
1587 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1588 "%s", fc_host_system_hostname(ha->host));
1589 alen = strlen(eiter->a.host_name);
1590 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1591 eiter->len = cpu_to_be16(4 + alen);
1592 size += 4 + alen;
1593
1594 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
1595 ha->host_no, eiter->a.host_name));
1596 }
1597
1588 /* Update MS request size. */ 1598 /* Update MS request size. */
1589 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1599 qla2x00_update_ms_fdmi_iocb(ha, size + 16);
1590 1600
@@ -1647,3 +1657,189 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1647 1657
1648 return rval; 1658 return rval;
1649} 1659}
1660
1661/**
1662 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
1663 * @ha: HA context
1664 * @list: switch info entries to populate
1665 *
1666 * Returns 0 on success.
1667 */
1668int
1669qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1670{
1671 int rval;
1672 uint16_t i;
1673
1674 ms_iocb_entry_t *ms_pkt;
1675 struct ct_sns_req *ct_req;
1676 struct ct_sns_rsp *ct_rsp;
1677
1678 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
1679 return QLA_FUNCTION_FAILED;
1680
1681 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1682 /* Issue GFPN_ID */
1683 memset(list[i].fabric_port_name, 0, WWN_SIZE);
1684
1685 /* Prepare common MS IOCB */
1686 ms_pkt = qla2x00_prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
1687 GFPN_ID_RSP_SIZE);
1688
1689 /* Prepare CT request */
1690 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GFPN_ID_CMD,
1691 GFPN_ID_RSP_SIZE);
1692 ct_rsp = &ha->ct_sns->p.rsp;
1693
1694 /* Prepare CT arguments -- port_id */
1695 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
1696 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
1697 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
1698
1699 /* Execute MS IOCB */
1700 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1701 sizeof(ms_iocb_entry_t));
1702 if (rval != QLA_SUCCESS) {
1703 /*EMPTY*/
1704 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
1705 "failed (%d).\n", ha->host_no, rval));
1706 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
1707 "GFPN_ID") != QLA_SUCCESS) {
1708 rval = QLA_FUNCTION_FAILED;
1709 } else {
1710 /* Save fabric portname */
1711 memcpy(list[i].fabric_port_name,
1712 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
1713 }
1714
1715 /* Last device exit. */
1716 if (list[i].d_id.b.rsvd_1 != 0)
1717 break;
1718 }
1719
1720 return (rval);
1721}
1722
1723static inline void *
1724qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1725 uint32_t rsp_size)
1726{
1727 struct ct_entry_24xx *ct_pkt;
1728
1729 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1730 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1731
1732 ct_pkt->entry_type = CT_IOCB_TYPE;
1733 ct_pkt->entry_count = 1;
1734 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1735 ct_pkt->timeout = __constant_cpu_to_le16(59);
1736 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1737 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1738 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1739 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1740
1741 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1742 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1743 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1744
1745 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1746 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1747 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1748
1749 return ct_pkt;
1750}
1751
1752
1753static inline struct ct_sns_req *
1754qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
1755 uint16_t rsp_size)
1756{
1757 memset(ct_req, 0, sizeof(struct ct_sns_pkt));
1758
1759 ct_req->header.revision = 0x01;
1760 ct_req->header.gs_type = 0xFA;
1761 ct_req->header.gs_subtype = 0x01;
1762 ct_req->command = cpu_to_be16(cmd);
1763 ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1764
1765 return ct_req;
1766}
1767
1768/**
1769 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
1770 * @ha: HA context
1771 * @list: switch info entries to populate
1772 *
1773 * Returns 0 on success.
1774 */
1775int
1776qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1777{
1778 int rval;
1779 uint16_t i;
1780
1781 ms_iocb_entry_t *ms_pkt;
1782 struct ct_sns_req *ct_req;
1783 struct ct_sns_rsp *ct_rsp;
1784
1785 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
1786 return QLA_FUNCTION_FAILED;
1787
1788 rval = qla2x00_mgmt_svr_login(ha);
1789 if (rval)
1790 return rval;
1791
1792 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1793 /* Issue GFPN_ID */
1794 list[i].fp_speeds = list[i].fp_speed = 0;
1795
1796 /* Prepare common MS IOCB */
1797 ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE,
1798 GPSC_RSP_SIZE);
1799
1800 /* Prepare CT request */
1801 ct_req = qla24xx_prep_ct_fm_req(&ha->ct_sns->p.req,
1802 GPSC_CMD, GPSC_RSP_SIZE);
1803 ct_rsp = &ha->ct_sns->p.rsp;
1804
1805 /* Prepare CT arguments -- port_name */
1806 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
1807 WWN_SIZE);
1808
1809 /* Execute MS IOCB */
1810 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1811 sizeof(ms_iocb_entry_t));
1812 if (rval != QLA_SUCCESS) {
1813 /*EMPTY*/
1814 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
1815 "failed (%d).\n", ha->host_no, rval));
1816 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
1817 "GPSC") != QLA_SUCCESS) {
1818 rval = QLA_FUNCTION_FAILED;
1819 } else {
1820 /* Save portname */
1821 list[i].fp_speeds = ct_rsp->rsp.gpsc.speeds;
1822 list[i].fp_speed = ct_rsp->rsp.gpsc.speed;
1823
1824 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
1825 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1826 "speed=%04x.\n", ha->host_no,
1827 list[i].fabric_port_name[0],
1828 list[i].fabric_port_name[1],
1829 list[i].fabric_port_name[2],
1830 list[i].fabric_port_name[3],
1831 list[i].fabric_port_name[4],
1832 list[i].fabric_port_name[5],
1833 list[i].fabric_port_name[6],
1834 list[i].fabric_port_name[7],
1835 be16_to_cpu(list[i].fp_speeds),
1836 be16_to_cpu(list[i].fp_speed)));
1837 }
1838
1839 /* Last device exit. */
1840 if (list[i].d_id.b.rsvd_1 != 0)
1841 break;
1842 }
1843
1844 return (rval);
1845}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 859649160ca..d5d26273c04 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2074,6 +2074,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2074 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2074 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2075 } 2075 }
2076 2076
2077 /* Base iIDMA settings on HBA port speed. */
2078 switch (ha->link_data_rate) {
2079 case PORT_SPEED_1GB:
2080 fcport->fp_speed = cpu_to_be16(BIT_15);
2081 break;
2082 case PORT_SPEED_2GB:
2083 fcport->fp_speed = cpu_to_be16(BIT_14);
2084 break;
2085 case PORT_SPEED_4GB:
2086 fcport->fp_speed = cpu_to_be16(BIT_13);
2087 break;
2088 }
2089
2077 qla2x00_update_fcport(ha, fcport); 2090 qla2x00_update_fcport(ha, fcport);
2078 2091
2079 found_devs++; 2092 found_devs++;
@@ -2109,6 +2122,62 @@ qla2x00_probe_for_all_luns(scsi_qla_host_t *ha)
2109 } 2122 }
2110} 2123}
2111 2124
2125static void
2126qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2127{
2128#define LS_UNKNOWN 2
2129 static char *link_speeds[5] = { "1", "2", "?", "4" };
2130 int rval;
2131 uint16_t port_speed, mb[6];
2132
2133 if (!IS_QLA24XX(ha))
2134 return;
2135
2136 switch (be16_to_cpu(fcport->fp_speed)) {
2137 case BIT_15:
2138 port_speed = PORT_SPEED_1GB;
2139 break;
2140 case BIT_14:
2141 port_speed = PORT_SPEED_2GB;
2142 break;
2143 case BIT_13:
2144 port_speed = PORT_SPEED_4GB;
2145 break;
2146 default:
2147 DEBUG2(printk("scsi(%ld): %02x%02x%02x%02x%02x%02x%02x%02x -- "
2148 "unsupported FM port operating speed (%04x).\n",
2149 ha->host_no, fcport->port_name[0], fcport->port_name[1],
2150 fcport->port_name[2], fcport->port_name[3],
2151 fcport->port_name[4], fcport->port_name[5],
2152 fcport->port_name[6], fcport->port_name[7],
2153 be16_to_cpu(fcport->fp_speed)));
2154 port_speed = PORT_SPEED_UNKNOWN;
2155 break;
2156 }
2157 if (port_speed == PORT_SPEED_UNKNOWN)
2158 return;
2159
2160 rval = qla2x00_set_idma_speed(ha, fcport->loop_id, port_speed, mb);
2161 if (rval != QLA_SUCCESS) {
2162 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2163 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2164 ha->host_no, fcport->port_name[0], fcport->port_name[1],
2165 fcport->port_name[2], fcport->port_name[3],
2166 fcport->port_name[4], fcport->port_name[5],
2167 fcport->port_name[6], fcport->port_name[7], rval,
2168 port_speed, mb[0], mb[1]));
2169 } else {
2170 DEBUG2(qla_printk(KERN_INFO, ha,
2171 "iIDMA adjusted to %s GB/s on "
2172 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2173 link_speeds[port_speed], fcport->port_name[0],
2174 fcport->port_name[1], fcport->port_name[2],
2175 fcport->port_name[3], fcport->port_name[4],
2176 fcport->port_name[5], fcport->port_name[6],
2177 fcport->port_name[7]));
2178 }
2179}
2180
2112/* 2181/*
2113 * qla2x00_update_fcport 2182 * qla2x00_update_fcport
2114 * Updates device on list. 2183 * Updates device on list.
@@ -2135,6 +2204,8 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2135 PORT_RETRY_TIME); 2204 PORT_RETRY_TIME);
2136 fcport->flags &= ~FCF_LOGIN_NEEDED; 2205 fcport->flags &= ~FCF_LOGIN_NEEDED;
2137 2206
2207 qla2x00_iidma_fcport(ha, fcport);
2208
2138 atomic_set(&fcport->state, FCS_ONLINE); 2209 atomic_set(&fcport->state, FCS_ONLINE);
2139 2210
2140 if (ha->flags.init_done) 2211 if (ha->flags.init_done)
@@ -2209,7 +2280,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2209 loop_id = NPH_F_PORT; 2280 loop_id = NPH_F_PORT;
2210 else 2281 else
2211 loop_id = SNS_FL_PORT; 2282 loop_id = SNS_FL_PORT;
2212 rval = qla2x00_get_port_name(ha, loop_id, NULL, 0); 2283 rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1);
2213 if (rval != QLA_SUCCESS) { 2284 if (rval != QLA_SUCCESS) {
2214 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2285 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2215 "Port\n", ha->host_no)); 2286 "Port\n", ha->host_no));
@@ -2217,6 +2288,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2217 ha->device_flags &= ~SWITCH_FOUND; 2288 ha->device_flags &= ~SWITCH_FOUND;
2218 return (QLA_SUCCESS); 2289 return (QLA_SUCCESS);
2219 } 2290 }
2291 ha->device_flags |= SWITCH_FOUND;
2220 2292
2221 /* Mark devices that need re-synchronization. */ 2293 /* Mark devices that need re-synchronization. */
2222 rval2 = qla2x00_device_resync(ha); 2294 rval2 = qla2x00_device_resync(ha);
@@ -2416,6 +2488,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2416 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2488 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
2417 kfree(swl); 2489 kfree(swl);
2418 swl = NULL; 2490 swl = NULL;
2491 } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
2492 qla2x00_gpsc(ha, swl);
2419 } 2493 }
2420 } 2494 }
2421 swl_idx = 0; 2495 swl_idx = 0;
@@ -2450,6 +2524,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2450 swl[swl_idx].node_name, WWN_SIZE); 2524 swl[swl_idx].node_name, WWN_SIZE);
2451 memcpy(new_fcport->port_name, 2525 memcpy(new_fcport->port_name,
2452 swl[swl_idx].port_name, WWN_SIZE); 2526 swl[swl_idx].port_name, WWN_SIZE);
2527 memcpy(new_fcport->fabric_port_name,
2528 swl[swl_idx].fabric_port_name, WWN_SIZE);
2529 new_fcport->fp_speed = swl[swl_idx].fp_speed;
2453 2530
2454 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 2531 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
2455 last_dev = 1; 2532 last_dev = 1;
@@ -2507,6 +2584,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2507 2584
2508 found++; 2585 found++;
2509 2586
2587 /* Update port state. */
2588 memcpy(fcport->fabric_port_name,
2589 new_fcport->fabric_port_name, WWN_SIZE);
2590 fcport->fp_speed = new_fcport->fp_speed;
2591
2510 /* 2592 /*
2511 * If address the same and state FCS_ONLINE, nothing 2593 * If address the same and state FCS_ONLINE, nothing
2512 * changed. 2594 * changed.
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index de0613135f7..5fa933cda99 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -400,7 +400,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
400 case MBA_LOOP_UP: /* Loop Up Event */ 400 case MBA_LOOP_UP: /* Loop Up Event */
401 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 401 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
402 link_speed = link_speeds[0]; 402 link_speed = link_speeds[0];
403 ha->link_data_rate = LDR_1GB; 403 ha->link_data_rate = PORT_SPEED_1GB;
404 } else { 404 } else {
405 link_speed = link_speeds[LS_UNKNOWN]; 405 link_speed = link_speeds[LS_UNKNOWN];
406 if (mb[1] < 5) 406 if (mb[1] < 5)
@@ -429,7 +429,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
429 } 429 }
430 430
431 ha->flags.management_server_logged_in = 0; 431 ha->flags.management_server_logged_in = 0;
432 ha->link_data_rate = LDR_UNKNOWN; 432 ha->link_data_rate = PORT_SPEED_UNKNOWN;
433 if (ql2xfdmienable) 433 if (ql2xfdmienable)
434 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 434 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
435 break; 435 break;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 879f281e2ea..4cde76c85cb 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2540,3 +2540,89 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2540 2540
2541 return rval; 2541 return rval;
2542} 2542}
2543
2544int
2545qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2546 uint16_t *port_speed, uint16_t *mb)
2547{
2548 int rval;
2549 mbx_cmd_t mc;
2550 mbx_cmd_t *mcp = &mc;
2551
2552 if (!IS_QLA24XX(ha))
2553 return QLA_FUNCTION_FAILED;
2554
2555 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2556
2557 mcp->mb[0] = MBC_PORT_PARAMS;
2558 mcp->mb[1] = loop_id;
2559 mcp->mb[2] = mcp->mb[3] = mcp->mb[4] = mcp->mb[5] = 0;
2560 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2561 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2562 mcp->tov = 30;
2563 mcp->flags = 0;
2564 rval = qla2x00_mailbox_command(ha, mcp);
2565
2566 /* Return mailbox statuses. */
2567 if (mb != NULL) {
2568 mb[0] = mcp->mb[0];
2569 mb[1] = mcp->mb[1];
2570 mb[3] = mcp->mb[3];
2571 mb[4] = mcp->mb[4];
2572 mb[5] = mcp->mb[5];
2573 }
2574
2575 if (rval != QLA_SUCCESS) {
2576 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2577 ha->host_no, rval));
2578 } else {
2579 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2580 if (port_speed)
2581 *port_speed = mcp->mb[3];
2582 }
2583
2584 return rval;
2585}
2586
2587int
2588qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2589 uint16_t port_speed, uint16_t *mb)
2590{
2591 int rval;
2592 mbx_cmd_t mc;
2593 mbx_cmd_t *mcp = &mc;
2594
2595 if (!IS_QLA24XX(ha))
2596 return QLA_FUNCTION_FAILED;
2597
2598 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2599
2600 mcp->mb[0] = MBC_PORT_PARAMS;
2601 mcp->mb[1] = loop_id;
2602 mcp->mb[2] = BIT_0;
2603 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2604 mcp->mb[4] = mcp->mb[5] = 0;
2605 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2606 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2607 mcp->tov = 30;
2608 mcp->flags = 0;
2609 rval = qla2x00_mailbox_command(ha, mcp);
2610
2611 /* Return mailbox statuses. */
2612 if (mb != NULL) {
2613 mb[0] = mcp->mb[0];
2614 mb[1] = mcp->mb[1];
2615 mb[3] = mcp->mb[3];
2616 mb[4] = mcp->mb[4];
2617 mb[5] = mcp->mb[5];
2618 }
2619
2620 if (rval != QLA_SUCCESS) {
2621 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2622 ha->host_no, rval));
2623 } else {
2624 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2625 }
2626
2627 return rval;
2628}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 65cbe2f5eea..3ba8c239f17 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -589,6 +589,23 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
589 return (return_status); 589 return (return_status);
590} 590}
591 591
592static void
593qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
594{
595 struct Scsi_Host *shost = cmnd->device->host;
596 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
597 unsigned long flags;
598
599 spin_lock_irqsave(shost->host_lock, flags);
600 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
601 spin_unlock_irqrestore(shost->host_lock, flags);
602 msleep(1000);
603 spin_lock_irqsave(shost->host_lock, flags);
604 }
605 spin_unlock_irqrestore(shost->host_lock, flags);
606 return;
607}
608
592/************************************************************************** 609/**************************************************************************
593* qla2xxx_eh_abort 610* qla2xxx_eh_abort
594* 611*
@@ -615,6 +632,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
615 unsigned long flags; 632 unsigned long flags;
616 int wait = 0; 633 int wait = 0;
617 634
635 qla2x00_block_error_handler(cmd);
636
618 if (!CMD_SP(cmd)) 637 if (!CMD_SP(cmd))
619 return SUCCESS; 638 return SUCCESS;
620 639
@@ -748,6 +767,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
748 unsigned int id, lun; 767 unsigned int id, lun;
749 unsigned long serial; 768 unsigned long serial;
750 769
770 qla2x00_block_error_handler(cmd);
771
751 ret = FAILED; 772 ret = FAILED;
752 773
753 id = cmd->device->id; 774 id = cmd->device->id;
@@ -877,6 +898,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
877 unsigned int id, lun; 898 unsigned int id, lun;
878 unsigned long serial; 899 unsigned long serial;
879 900
901 qla2x00_block_error_handler(cmd);
902
880 ret = FAILED; 903 ret = FAILED;
881 904
882 id = cmd->device->id; 905 id = cmd->device->id;
@@ -936,6 +959,8 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
936 unsigned int id, lun; 959 unsigned int id, lun;
937 unsigned long serial; 960 unsigned long serial;
938 961
962 qla2x00_block_error_handler(cmd);
963
939 ret = FAILED; 964 ret = FAILED;
940 965
941 id = cmd->device->id; 966 id = cmd->device->id;
@@ -1385,7 +1410,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1385 ha->prev_topology = 0; 1410 ha->prev_topology = 0;
1386 ha->init_cb_size = sizeof(init_cb_t); 1411 ha->init_cb_size = sizeof(init_cb_t);
1387 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1412 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1388 ha->link_data_rate = LDR_UNKNOWN; 1413 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1389 ha->optrom_size = OPTROM_SIZE_2300; 1414 ha->optrom_size = OPTROM_SIZE_2300;
1390 1415
1391 /* Assign ISP specific operations. */ 1416 /* Assign ISP specific operations. */
@@ -2564,14 +2589,20 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2564#define FW_ISP2322 3 2589#define FW_ISP2322 3
2565#define FW_ISP24XX 4 2590#define FW_ISP24XX 4
2566 2591
2592#define FW_FILE_ISP21XX "ql2100_fw.bin"
2593#define FW_FILE_ISP22XX "ql2200_fw.bin"
2594#define FW_FILE_ISP2300 "ql2300_fw.bin"
2595#define FW_FILE_ISP2322 "ql2322_fw.bin"
2596#define FW_FILE_ISP24XX "ql2400_fw.bin"
2597
2567static DECLARE_MUTEX(qla_fw_lock); 2598static DECLARE_MUTEX(qla_fw_lock);
2568 2599
2569static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 2600static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2570 { .name = "ql2100_fw.bin", .segs = { 0x1000, 0 }, }, 2601 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
2571 { .name = "ql2200_fw.bin", .segs = { 0x1000, 0 }, }, 2602 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
2572 { .name = "ql2300_fw.bin", .segs = { 0x800, 0 }, }, 2603 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
2573 { .name = "ql2322_fw.bin", .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2604 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
2574 { .name = "ql2400_fw.bin", }, 2605 { .name = FW_FILE_ISP24XX, },
2575}; 2606};
2576 2607
2577struct fw_blob * 2608struct fw_blob *
@@ -2702,3 +2733,8 @@ MODULE_AUTHOR("QLogic Corporation");
2702MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 2733MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
2703MODULE_LICENSE("GPL"); 2734MODULE_LICENSE("GPL");
2704MODULE_VERSION(QLA2XXX_VERSION); 2735MODULE_VERSION(QLA2XXX_VERSION);
2736MODULE_FIRMWARE(FW_FILE_ISP21XX);
2737MODULE_FIRMWARE(FW_FILE_ISP22XX);
2738MODULE_FIRMWARE(FW_FILE_ISP2300);
2739MODULE_FIRMWARE(FW_FILE_ISP2322);
2740MODULE_FIRMWARE(FW_FILE_ISP24XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 971259032ef..e57bf45a339 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.07-k1" 10#define QLA2XXX_VERSION "8.01.07-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
new file mode 100644
index 00000000000..08a07f0b8d9
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX host adapter family support"
3 depends on PCI && SCSI
4 select SCSI_ISCSI_ATTRS
5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) iSCSI host
7 adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
new file mode 100644
index 00000000000..86ea37baa0f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -0,0 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nvram.o ql4_dbg.o
3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
new file mode 100644
index 00000000000..752031fadfe
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -0,0 +1,197 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include <scsi/scsi_dbg.h>
10
11static void qla4xxx_print_srb_info(struct srb * srb)
12{
13 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
14 printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
15 __func__, srb->cmd, (unsigned long) srb->dma_handle);
16 printk("%s: fw_ddb_index = %d, lun = %d\n",
17 __func__, srb->fw_ddb_index, srb->cmd->device->lun);
18 printk("%s: iocb_tov = %d\n",
19 __func__, srb->iocb_tov);
20 printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
21 __func__, srb->cc_stat, srb->r_start, srb->u_start);
22}
23
24void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
25{
26 printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
27 printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
28 cmd->device->channel, cmd->device->id, cmd->device->lun,
29 cmd->cmd_len);
30 scsi_print_command(cmd);
31 printk(" seg_cnt = %d\n", cmd->use_sg);
32 printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
33 cmd->request_buffer, cmd->request_bufflen);
34 if (cmd->use_sg) {
35 struct scatterlist *sg;
36 sg = (struct scatterlist *)cmd->request_buffer;
37 printk(" SG buffer: \n");
38 qla4xxx_dump_buffer((caddr_t) sg,
39 (cmd->use_sg * sizeof(*sg)));
40 }
41 printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
42 cmd->transfersize);
43 printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
44 printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
45 cmd->sc_data_direction);
46 printk(" Current time (jiffies) = 0x%lx, "
47 "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
48 qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
49}
50
51void __dump_registers(struct scsi_qla_host *ha)
52{
53 uint8_t i;
54 for (i = 0; i < MBOX_REG_COUNT; i++) {
55 printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
56 (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
57 readw(&ha->reg->mailbox[i]));
58 }
59 printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
60 (uint8_t) offsetof(struct isp_reg, flash_address),
61 readw(&ha->reg->flash_address));
62 printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
63 (uint8_t) offsetof(struct isp_reg, flash_data),
64 readw(&ha->reg->flash_data));
65 printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
66 (uint8_t) offsetof(struct isp_reg, ctrl_status),
67 readw(&ha->reg->ctrl_status));
68 if (is_qla4010(ha)) {
69 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
70 (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
71 readw(&ha->reg->u1.isp4010.nvram));
72 }
73
74 else if (is_qla4022(ha)) {
75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
76 (uint8_t) offsetof(struct isp_reg,
77 u1.isp4022.intr_mask),
78 readw(&ha->reg->u1.isp4022.intr_mask));
79 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
80 (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
81 readw(&ha->reg->u1.isp4022.nvram));
82 printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
83 (uint8_t) offsetof(struct isp_reg,
84 u1.isp4022.semaphore),
85 readw(&ha->reg->u1.isp4022.semaphore));
86 }
87 printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
88 (uint8_t) offsetof(struct isp_reg, req_q_in),
89 readw(&ha->reg->req_q_in));
90 printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
91 (uint8_t) offsetof(struct isp_reg, rsp_q_out),
92 readw(&ha->reg->rsp_q_out));
93 if (is_qla4010(ha)) {
94 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
95 (uint8_t) offsetof(struct isp_reg,
96 u2.isp4010.ext_hw_conf),
97 readw(&ha->reg->u2.isp4010.ext_hw_conf));
98 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
99 (uint8_t) offsetof(struct isp_reg,
100 u2.isp4010.port_ctrl),
101 readw(&ha->reg->u2.isp4010.port_ctrl));
102 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
103 (uint8_t) offsetof(struct isp_reg,
104 u2.isp4010.port_status),
105 readw(&ha->reg->u2.isp4010.port_status));
106 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
107 (uint8_t) offsetof(struct isp_reg,
108 u2.isp4010.req_q_out),
109 readw(&ha->reg->u2.isp4010.req_q_out));
110 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
111 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
112 readw(&ha->reg->u2.isp4010.gp_out));
113 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
114 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
115 readw(&ha->reg->u2.isp4010.gp_in));
116 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
117 (uint8_t) offsetof(struct isp_reg,
118 u2.isp4010.port_err_status),
119 readw(&ha->reg->u2.isp4010.port_err_status));
120 }
121
122 else if (is_qla4022(ha)) {
123 printk(KERN_INFO "Page 0 Registers:\n");
124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
125 (uint8_t) offsetof(struct isp_reg,
126 u2.isp4022.p0.ext_hw_conf),
127 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
128 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
129 (uint8_t) offsetof(struct isp_reg,
130 u2.isp4022.p0.port_ctrl),
131 readw(&ha->reg->u2.isp4022.p0.port_ctrl));
132 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
133 (uint8_t) offsetof(struct isp_reg,
134 u2.isp4022.p0.port_status),
135 readw(&ha->reg->u2.isp4022.p0.port_status));
136 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
137 (uint8_t) offsetof(struct isp_reg,
138 u2.isp4022.p0.gp_out),
139 readw(&ha->reg->u2.isp4022.p0.gp_out));
140 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
141 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
142 readw(&ha->reg->u2.isp4022.p0.gp_in));
143 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
144 (uint8_t) offsetof(struct isp_reg,
145 u2.isp4022.p0.port_err_status),
146 readw(&ha->reg->u2.isp4022.p0.port_err_status));
147 printk(KERN_INFO "Page 1 Registers:\n");
148 writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
149 &ha->reg->ctrl_status);
150 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
151 (uint8_t) offsetof(struct isp_reg,
152 u2.isp4022.p1.req_q_out),
153 readw(&ha->reg->u2.isp4022.p1.req_q_out));
154 writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
155 &ha->reg->ctrl_status);
156 }
157}
158
159void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
160{
161 unsigned long flags = 0;
162 int i = 0;
163 spin_lock_irqsave(&ha->hardware_lock, flags);
164 for (i = 1; i < MBOX_REG_COUNT; i++)
165 printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
166 readw(&ha->reg->mailbox[i]));
167 spin_unlock_irqrestore(&ha->hardware_lock, flags);
168}
169
170void qla4xxx_dump_registers(struct scsi_qla_host *ha)
171{
172 unsigned long flags = 0;
173 spin_lock_irqsave(&ha->hardware_lock, flags);
174 __dump_registers(ha);
175 spin_unlock_irqrestore(&ha->hardware_lock, flags);
176}
177
178void qla4xxx_dump_buffer(void *b, uint32_t size)
179{
180 uint32_t cnt;
181 uint8_t *c = b;
182
183 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
184 "Fh\n");
185 printk("------------------------------------------------------------"
186 "--\n");
187 for (cnt = 0; cnt < size; cnt++, c++) {
188 printk(KERN_DEBUG "%02x", *c);
189 if (!(cnt % 16))
190 printk(KERN_DEBUG "\n");
191
192 else
193 printk(KERN_DEBUG " ");
194 }
195 if (cnt % 16)
196 printk(KERN_DEBUG "\n");
197}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
new file mode 100644
index 00000000000..56ddc227f84
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -0,0 +1,55 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8/*
9 * Driver debug definitions.
10 */
11/* #define QL_DEBUG */ /* DEBUG messages */
12/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
13/* #define QL_DEBUG_LEVEL_4 */
14/* #define QL_DEBUG_LEVEL_5 */
15/* #define QL_DEBUG_LEVEL_9 */
16
17#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
18#if defined(QL_DEBUG)
19#define DEBUG(x) do {x;} while (0);
20#else
21#define DEBUG(x) do {} while (0);
22#endif
23
24#if defined(QL_DEBUG_LEVEL_2)
25#define DEBUG2(x) do {if(extended_error_logging == 2) x;} while (0);
26#define DEBUG2_3(x) do {x;} while (0);
27#else /* */
28#define DEBUG2(x) do {} while (0);
29#endif /* */
30
31#if defined(QL_DEBUG_LEVEL_3)
32#define DEBUG3(x) do {if(extended_error_logging == 3) x;} while (0);
33#else /* */
34#define DEBUG3(x) do {} while (0);
35#if !defined(QL_DEBUG_LEVEL_2)
36#define DEBUG2_3(x) do {} while (0);
37#endif /* */
38#endif /* */
39#if defined(QL_DEBUG_LEVEL_4)
40#define DEBUG4(x) do {x;} while (0);
41#else /* */
42#define DEBUG4(x) do {} while (0);
43#endif /* */
44
45#if defined(QL_DEBUG_LEVEL_5)
46#define DEBUG5(x) do {x;} while (0);
47#else /* */
48#define DEBUG5(x) do {} while (0);
49#endif /* */
50
51#if defined(QL_DEBUG_LEVEL_9)
52#define DEBUG9(x) do {x;} while (0);
53#else /* */
54#define DEBUG9(x) do {} while (0);
55#endif /* */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
new file mode 100644
index 00000000000..a7f6c7b1c59
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -0,0 +1,586 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QL4_DEF_H
9#define __QL4_DEF_H
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/module.h>
15#include <linux/list.h>
16#include <linux/pci.h>
17#include <linux/dma-mapping.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/dmapool.h>
21#include <linux/mempool.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/mutex.h>
27
28#include <net/tcp.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_transport_iscsi.h>
35
36
37#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
38#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
39#endif
40
41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
43#endif /* */
44
45#define QLA_SUCCESS 0
46#define QLA_ERROR 1
47
48/*
49 * Data bit definitions
50 */
51#define BIT_0 0x1
52#define BIT_1 0x2
53#define BIT_2 0x4
54#define BIT_3 0x8
55#define BIT_4 0x10
56#define BIT_5 0x20
57#define BIT_6 0x40
58#define BIT_7 0x80
59#define BIT_8 0x100
60#define BIT_9 0x200
61#define BIT_10 0x400
62#define BIT_11 0x800
63#define BIT_12 0x1000
64#define BIT_13 0x2000
65#define BIT_14 0x4000
66#define BIT_15 0x8000
67#define BIT_16 0x10000
68#define BIT_17 0x20000
69#define BIT_18 0x40000
70#define BIT_19 0x80000
71#define BIT_20 0x100000
72#define BIT_21 0x200000
73#define BIT_22 0x400000
74#define BIT_23 0x800000
75#define BIT_24 0x1000000
76#define BIT_25 0x2000000
77#define BIT_26 0x4000000
78#define BIT_27 0x8000000
79#define BIT_28 0x10000000
80#define BIT_29 0x20000000
81#define BIT_30 0x40000000
82#define BIT_31 0x80000000
83
84/*
85 * Host adapter default definitions
86 ***********************************/
87#define MAX_HBAS 16
88#define MAX_BUSES 1
89#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
90#define MAX_LUNS 0xffff
91#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
92#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
93#define MAX_PDU_ENTRIES 32
94#define INVALID_ENTRY 0xFFFF
95#define MAX_CMDS_TO_RISC 1024
96#define MAX_SRBS MAX_CMDS_TO_RISC
97#define MBOX_AEN_REG_COUNT 5
98#define MAX_INIT_RETRIES 5
99#define IOCB_HIWAT_CUSHION 16
100
101/*
102 * Buffer sizes
103 */
104#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC
105#define RESPONSE_QUEUE_DEPTH 64
106#define QUEUE_SIZE 64
107#define DMA_BUFFER_SIZE 512
108
109/*
110 * Misc
111 */
112#define MAC_ADDR_LEN 6 /* in bytes */
113#define IP_ADDR_LEN 4 /* in bytes */
114#define DRIVER_NAME "qla4xxx"
115
116#define MAX_LINKED_CMDS_PER_LUN 3
117#define MAX_REQS_SERVICED_PER_INTR 16
118
119#define ISCSI_IPADDR_SIZE 4 /* IP address size */
120#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
121#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
122 * usually a string */
123
124#define LSDW(x) ((u32)((u64)(x)))
125#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
126
127/*
128 * Retry & Timeout Values
129 */
130#define MBOX_TOV 60
131#define SOFT_RESET_TOV 30
132#define RESET_INTR_TOV 3
133#define SEMAPHORE_TOV 10
134#define ADAPTER_INIT_TOV 120
135#define ADAPTER_RESET_TOV 180
136#define EXTEND_CMD_TOV 60
137#define WAIT_CMD_TOV 30
138#define EH_WAIT_CMD_TOV 120
139#define FIRMWARE_UP_TOV 60
140#define RESET_FIRMWARE_TOV 30
141#define LOGOUT_TOV 10
142#define IOCB_TOV_MARGIN 10
143#define RELOGIN_TOV 18
144#define ISNS_DEREG_TOV 5
145
146#define MAX_RESET_HA_RETRIES 2
147
148/*
149 * SCSI Request Block structure (srb) that is placed
150 * on cmd->SCp location of every I/O [We have 22 bytes available]
151 */
152struct srb {
153 struct list_head list; /* (8) */
154 struct scsi_qla_host *ha; /* HA the SP is queued on */
155 struct ddb_entry *ddb;
156 uint16_t flags; /* (1) Status flags. */
157
158#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
159#define SRB_GOT_SENSE BIT_4 /* sense data recieved. */
160 uint8_t state; /* (1) Status flags. */
161
162#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */
163#define SRB_FREE_STATE 1
164#define SRB_ACTIVE_STATE 3
165#define SRB_ACTIVE_TIMEOUT_STATE 4
166#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */
167
168 struct scsi_cmnd *cmd; /* (4) SCSI command block */
169 dma_addr_t dma_handle; /* (4) for unmap of single transfers */
170 atomic_t ref_count; /* reference count for this srb */
171 uint32_t fw_ddb_index;
172 uint8_t err_id; /* error id */
173#define SRB_ERR_PORT 1 /* Request failed because "port down" */
174#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
175#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */
176#define SRB_ERR_OTHER 4
177
178 uint16_t reserved;
179 uint16_t iocb_tov;
180 uint16_t iocb_cnt; /* Number of used iocbs */
181 uint16_t cc_stat;
182 u_long r_start; /* Time we recieve a cmd from OS */
183 u_long u_start; /* Time when we handed the cmd to F/W */
184};
185
186 /*
187 * Device Database (DDB) structure
188 */
189struct ddb_entry {
190 struct list_head list; /* ddb list */
191 struct scsi_qla_host *ha;
192 struct iscsi_cls_session *sess;
193 struct iscsi_cls_conn *conn;
194
195 atomic_t state; /* DDB State */
196
197 unsigned long flags; /* DDB Flags */
198
199 unsigned long dev_scan_wait_to_start_relogin;
200 unsigned long dev_scan_wait_to_complete_relogin;
201
202 uint16_t os_target_id; /* Target ID */
203 uint16_t fw_ddb_index; /* DDB firmware index */
204 uint8_t reserved[2];
205 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
206
207 uint32_t CmdSn;
208 uint16_t target_session_id;
209 uint16_t connection_id;
210 uint16_t exe_throttle; /* Max mumber of cmds outstanding
211 * simultaneously */
212 uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
213 * complete */
214 uint16_t default_relogin_timeout; /* Max time to wait for
215 * relogin to complete */
216 uint16_t tcp_source_port_num;
217 uint32_t default_time2wait; /* Default Min time between
218 * relogins (+aens) */
219
220 atomic_t port_down_timer; /* Device connection timer */
221 atomic_t retry_relogin_timer; /* Min Time between relogins
222 * (4000 only) */
223 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
224 atomic_t relogin_retry_count; /* Num of times relogin has been
225 * retried */
226
227 uint16_t port;
228 uint32_t tpgt;
229 uint8_t ip_addr[ISCSI_IPADDR_SIZE];
230 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
231 uint8_t iscsi_alias[0x20];
232};
233
234/*
235 * DDB states.
236 */
237#define DDB_STATE_DEAD 0 /* We can no longer talk to
238 * this device */
239#define DDB_STATE_ONLINE 1 /* Device ready to accept
240 * commands */
241#define DDB_STATE_MISSING 2 /* Device logged off, trying
242 * to re-login */
243
244/*
245 * DDB flags.
246 */
247#define DF_RELOGIN 0 /* Relogin to device */
248#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
249 * logged it out */
250#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
251#define DF_FO_MASKED 3
252
253/*
254 * Asynchronous Event Queue structure
255 */
256struct aen {
257 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
258};
259
260
261#include "ql4_fw.h"
262#include "ql4_nvram.h"
263
264/*
265 * Linux Host Adapter structure
266 */
267struct scsi_qla_host {
268 /* Linux adapter configuration data */
269 struct Scsi_Host *host; /* pointer to host data */
270 uint32_t tot_ddbs;
271 unsigned long flags;
272
273#define AF_ONLINE 0 /* 0x00000001 */
274#define AF_INIT_DONE 1 /* 0x00000002 */
275#define AF_MBOX_COMMAND 2 /* 0x00000004 */
276#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
277#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
278#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
279#define AF_LINK_UP 8 /* 0x00000100 */
280#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
281#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
282#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
283#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
284
285 unsigned long dpc_flags;
286
287#define DPC_RESET_HA 1 /* 0x00000002 */
288#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
289#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
290#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
291#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
292#define DPC_ISNS_RESTART 7 /* 0x00000080 */
293#define DPC_AEN 9 /* 0x00000200 */
294#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
295
296 uint16_t iocb_cnt;
297 uint16_t iocb_hiwat;
298
299 /* SRB cache. */
300#define SRB_MIN_REQ 128
301 mempool_t *srb_mempool;
302
303 /* pci information */
304 struct pci_dev *pdev;
305
306 struct isp_reg __iomem *reg; /* Base I/O address */
307 unsigned long pio_address;
308 unsigned long pio_length;
309#define MIN_IOBASE_LEN 0x100
310
311 uint16_t req_q_count;
312 uint8_t marker_needed;
313 uint8_t rsvd1;
314
315 unsigned long host_no;
316
317 /* NVRAM registers */
318 struct eeprom_data *nvram;
319 spinlock_t hardware_lock ____cacheline_aligned;
320 spinlock_t list_lock;
321 uint32_t eeprom_cmd_data;
322
323 /* Counters for general statistics */
324 uint64_t adapter_error_count;
325 uint64_t device_error_count;
326 uint64_t total_io_count;
327 uint64_t total_mbytes_xferred;
328 uint64_t link_failure_count;
329 uint64_t invalid_crc_count;
330 uint32_t spurious_int_count;
331 uint32_t aborted_io_count;
332 uint32_t io_timeout_count;
333 uint32_t mailbox_timeout_count;
334 uint32_t seconds_since_last_intr;
335 uint32_t seconds_since_last_heartbeat;
336 uint32_t mac_index;
337
338 /* Info Needed for Management App */
339 /* --- From GetFwVersion --- */
340 uint32_t firmware_version[2];
341 uint32_t patch_number;
342 uint32_t build_number;
343
344 /* --- From Init_FW --- */
345 /* init_cb_t *init_cb; */
346 uint16_t firmware_options;
347 uint16_t tcp_options;
348 uint8_t ip_address[IP_ADDR_LEN];
349 uint8_t subnet_mask[IP_ADDR_LEN];
350 uint8_t gateway[IP_ADDR_LEN];
351 uint8_t alias[32];
352 uint8_t name_string[256];
353 uint8_t heartbeat_interval;
354 uint8_t rsvd;
355
356 /* --- From FlashSysInfo --- */
357 uint8_t my_mac[MAC_ADDR_LEN];
358 uint8_t serial_number[16];
359
360 /* --- From GetFwState --- */
361 uint32_t firmware_state;
362 uint32_t board_id;
363 uint32_t addl_fw_state;
364
365 /* Linux kernel thread */
366 struct workqueue_struct *dpc_thread;
367 struct work_struct dpc_work;
368
369 /* Linux timer thread */
370 struct timer_list timer;
371 uint32_t timer_active;
372
373 /* Recovery Timers */
374 uint32_t port_down_retry_count;
375 uint32_t discovery_wait;
376 atomic_t check_relogin_timeouts;
377 uint32_t retry_reset_ha_cnt;
378 uint32_t isp_reset_timer; /* reset test timer */
379 uint32_t nic_reset_timer; /* simulated nic reset test timer */
380 int eh_start;
381 struct list_head free_srb_q;
382 uint16_t free_srb_q_count;
383 uint16_t num_srbs_allocated;
384
385 /* DMA Memory Block */
386 void *queues;
387 dma_addr_t queues_dma;
388 unsigned long queues_len;
389
390#define MEM_ALIGN_VALUE \
391 ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
392 sizeof(struct queue_entry))
393 /* request and response queue variables */
394 dma_addr_t request_dma;
395 struct queue_entry *request_ring;
396 struct queue_entry *request_ptr;
397 dma_addr_t response_dma;
398 struct queue_entry *response_ring;
399 struct queue_entry *response_ptr;
400 dma_addr_t shadow_regs_dma;
401 struct shadow_regs *shadow_regs;
402 uint16_t request_in; /* Current indexes. */
403 uint16_t request_out;
404 uint16_t response_in;
405 uint16_t response_out;
406
407 /* aen queue variables */
408 uint16_t aen_q_count; /* Number of available aen_q entries */
409 uint16_t aen_in; /* Current indexes */
410 uint16_t aen_out;
411 struct aen aen_q[MAX_AEN_ENTRIES];
412
413 /* This mutex protects several threads to do mailbox commands
414 * concurrently.
415 */
416 struct mutex mbox_sem;
417 wait_queue_head_t mailbox_wait_queue;
418
419 /* temporary mailbox status registers */
420 volatile uint8_t mbox_status_count;
421 volatile uint32_t mbox_status[MBOX_REG_COUNT];
422
423 /* local device database list (contains internal ddb entries) */
424 struct list_head ddb_list;
425
426 /* Map ddb_list entry by FW ddb index */
427 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
428
429};
430
431static inline int is_qla4010(struct scsi_qla_host *ha)
432{
433 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
434}
435
436static inline int is_qla4022(struct scsi_qla_host *ha)
437{
438 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
439}
440
441static inline int adapter_up(struct scsi_qla_host *ha)
442{
443 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
444 (test_bit(AF_LINK_UP, &ha->flags) != 0);
445}
446
447static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
448{
449 return (struct scsi_qla_host *)shost->hostdata;
450}
451
452static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
453{
454 return (is_qla4022(ha) ?
455 &ha->reg->u1.isp4022.semaphore :
456 &ha->reg->u1.isp4010.nvram);
457}
458
459static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
460{
461 return (is_qla4022(ha) ?
462 &ha->reg->u1.isp4022.nvram :
463 &ha->reg->u1.isp4010.nvram);
464}
465
466static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
467{
468 return (is_qla4022(ha) ?
469 &ha->reg->u2.isp4022.p0.ext_hw_conf :
470 &ha->reg->u2.isp4010.ext_hw_conf);
471}
472
473static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
474{
475 return (is_qla4022(ha) ?
476 &ha->reg->u2.isp4022.p0.port_status :
477 &ha->reg->u2.isp4010.port_status);
478}
479
480static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
481{
482 return (is_qla4022(ha) ?
483 &ha->reg->u2.isp4022.p0.port_ctrl :
484 &ha->reg->u2.isp4010.port_ctrl);
485}
486
487static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
488{
489 return (is_qla4022(ha) ?
490 &ha->reg->u2.isp4022.p0.port_err_status :
491 &ha->reg->u2.isp4010.port_err_status);
492}
493
494static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
495{
496 return (is_qla4022(ha) ?
497 &ha->reg->u2.isp4022.p0.gp_out :
498 &ha->reg->u2.isp4010.gp_out);
499}
500
501static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
502{
503 return (is_qla4022(ha) ?
504 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
505 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
506}
507
508int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
509void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
510int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
511
512static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
513{
514 if (is_qla4022(a))
515 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
516 (QL4022_RESOURCE_BITS_BASE_CODE |
517 (a->mac_index)) << 13);
518 else
519 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
520 QL4010_FLASH_SEM_BITS);
521}
522
523static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
524{
525 if (is_qla4022(a))
526 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
527 else
528 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
529}
530
531static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
532{
533 if (is_qla4022(a))
534 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
535 (QL4022_RESOURCE_BITS_BASE_CODE |
536 (a->mac_index)) << 10);
537 else
538 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
539 QL4010_NVRAM_SEM_BITS);
540}
541
542static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
543{
544 if (is_qla4022(a))
545 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
546 else
547 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
548}
549
550static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
551{
552 if (is_qla4022(a))
553 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
554 (QL4022_RESOURCE_BITS_BASE_CODE |
555 (a->mac_index)) << 1);
556 else
557 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
558 QL4010_DRVR_SEM_BITS);
559}
560
561static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
562{
563 if (is_qla4022(a))
564 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
565 else
566 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
567}
568
569/*---------------------------------------------------------------------------*/
570
571/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
572#define PRESERVE_DDB_LIST 0
573#define REBUILD_DDB_LIST 1
574
575/* Defines for process_aen() */
576#define PROCESS_ALL_AENS 0
577#define FLUSH_DDB_CHANGED_AENS 1
578#define RELOGIN_DDB_CHANGED_AENS 2
579
580#include "ql4_version.h"
581#include "ql4_glbl.h"
582#include "ql4_dbg.h"
583#include "ql4_inline.h"
584
585
586#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
new file mode 100644
index 00000000000..427489de64b
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -0,0 +1,843 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef _QLA4X_FW_H
9#define _QLA4X_FW_H
10
11
12#define MAX_PRST_DEV_DB_ENTRIES 64
13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
14#define MAX_DEV_DB_ENTRIES 512
15
16/*************************************************************************
17 *
18 * ISP 4010 I/O Register Set Structure and Definitions
19 *
20 *************************************************************************/
21
22struct port_ctrl_stat_regs {
23 __le32 ext_hw_conf; /* 80 x50 R/W */
24 __le32 intChipConfiguration; /* 84 x54 */
25 __le32 port_ctrl; /* 88 x58 */
26 __le32 port_status; /* 92 x5c */
27 __le32 HostPrimMACHi; /* 96 x60 */
28 __le32 HostPrimMACLow; /* 100 x64 */
29 __le32 HostSecMACHi; /* 104 x68 */
30 __le32 HostSecMACLow; /* 108 x6c */
31 __le32 EPPrimMACHi; /* 112 x70 */
32 __le32 EPPrimMACLow; /* 116 x74 */
33 __le32 EPSecMACHi; /* 120 x78 */
34 __le32 EPSecMACLow; /* 124 x7c */
35 __le32 HostPrimIPHi; /* 128 x80 */
36 __le32 HostPrimIPMidHi; /* 132 x84 */
37 __le32 HostPrimIPMidLow; /* 136 x88 */
38 __le32 HostPrimIPLow; /* 140 x8c */
39 __le32 HostSecIPHi; /* 144 x90 */
40 __le32 HostSecIPMidHi; /* 148 x94 */
41 __le32 HostSecIPMidLow; /* 152 x98 */
42 __le32 HostSecIPLow; /* 156 x9c */
43 __le32 EPPrimIPHi; /* 160 xa0 */
44 __le32 EPPrimIPMidHi; /* 164 xa4 */
45 __le32 EPPrimIPMidLow; /* 168 xa8 */
46 __le32 EPPrimIPLow; /* 172 xac */
47 __le32 EPSecIPHi; /* 176 xb0 */
48 __le32 EPSecIPMidHi; /* 180 xb4 */
49 __le32 EPSecIPMidLow; /* 184 xb8 */
50 __le32 EPSecIPLow; /* 188 xbc */
51 __le32 IPReassemblyTimeout; /* 192 xc0 */
52 __le32 EthMaxFramePayload; /* 196 xc4 */
53 __le32 TCPMaxWindowSize; /* 200 xc8 */
54 __le32 TCPCurrentTimestampHi; /* 204 xcc */
55 __le32 TCPCurrentTimestampLow; /* 208 xd0 */
56 __le32 LocalRAMAddress; /* 212 xd4 */
57 __le32 LocalRAMData; /* 216 xd8 */
58 __le32 PCSReserved1; /* 220 xdc */
59 __le32 gp_out; /* 224 xe0 */
60 __le32 gp_in; /* 228 xe4 */
61 __le32 ProbeMuxAddr; /* 232 xe8 */
62 __le32 ProbeMuxData; /* 236 xec */
63 __le32 ERMQueueBaseAddr0; /* 240 xf0 */
64 __le32 ERMQueueBaseAddr1; /* 244 xf4 */
65 __le32 MACConfiguration; /* 248 xf8 */
66 __le32 port_err_status; /* 252 xfc COR */
67};
68
69struct host_mem_cfg_regs {
70 __le32 NetRequestQueueOut; /* 80 x50 */
71 __le32 NetRequestQueueOutAddrHi; /* 84 x54 */
72 __le32 NetRequestQueueOutAddrLow; /* 88 x58 */
73 __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
74 __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
75 __le32 NetRequestQueueLength; /* 100 x64 */
76 __le32 NetResponseQueueIn; /* 104 x68 */
77 __le32 NetResponseQueueInAddrHi; /* 108 x6c */
78 __le32 NetResponseQueueInAddrLow; /* 112 x70 */
79 __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
80 __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
81 __le32 NetResponseQueueLength; /* 124 x7c */
82 __le32 req_q_out; /* 128 x80 */
83 __le32 RequestQueueOutAddrHi; /* 132 x84 */
84 __le32 RequestQueueOutAddrLow; /* 136 x88 */
85 __le32 RequestQueueBaseAddrHi; /* 140 x8c */
86 __le32 RequestQueueBaseAddrLow; /* 144 x90 */
87 __le32 RequestQueueLength; /* 148 x94 */
88 __le32 ResponseQueueIn; /* 152 x98 */
89 __le32 ResponseQueueInAddrHi; /* 156 x9c */
90 __le32 ResponseQueueInAddrLow; /* 160 xa0 */
91 __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
92 __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
93 __le32 ResponseQueueLength; /* 172 xac */
94 __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
95 __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
96 __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
97 __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
98 __le32 NetRxLargeBufferLength; /* 192 xc0 */
99 __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
100 __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
101 __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
102 __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
103 __le32 NetRxSmallBufferLength; /* 212 xd4 */
104 __le32 HMCReserved0[10]; /* 216 xd8 */
105};
106
107struct local_ram_cfg_regs {
108 __le32 BufletSize; /* 80 x50 */
109 __le32 BufletMaxCount; /* 84 x54 */
110 __le32 BufletCurrCount; /* 88 x58 */
111 __le32 BufletPauseThresholdCount; /* 92 x5c */
112 __le32 BufletTCPWinThresholdHi; /* 96 x60 */
113 __le32 BufletTCPWinThresholdLow; /* 100 x64 */
114 __le32 IPHashTableBaseAddr; /* 104 x68 */
115 __le32 IPHashTableSize; /* 108 x6c */
116 __le32 TCPHashTableBaseAddr; /* 112 x70 */
117 __le32 TCPHashTableSize; /* 116 x74 */
118 __le32 NCBAreaBaseAddr; /* 120 x78 */
119 __le32 NCBMaxCount; /* 124 x7c */
120 __le32 NCBCurrCount; /* 128 x80 */
121 __le32 DRBAreaBaseAddr; /* 132 x84 */
122 __le32 DRBMaxCount; /* 136 x88 */
123 __le32 DRBCurrCount; /* 140 x8c */
124 __le32 LRCReserved[28]; /* 144 x90 */
125};
126
127struct prot_stat_regs {
128 __le32 MACTxFrameCount; /* 80 x50 R */
129 __le32 MACTxByteCount; /* 84 x54 R */
130 __le32 MACRxFrameCount; /* 88 x58 R */
131 __le32 MACRxByteCount; /* 92 x5c R */
132 __le32 MACCRCErrCount; /* 96 x60 R */
133 __le32 MACEncErrCount; /* 100 x64 R */
134 __le32 MACRxLengthErrCount; /* 104 x68 R */
135 __le32 IPTxPacketCount; /* 108 x6c R */
136 __le32 IPTxByteCount; /* 112 x70 R */
137 __le32 IPTxFragmentCount; /* 116 x74 R */
138 __le32 IPRxPacketCount; /* 120 x78 R */
139 __le32 IPRxByteCount; /* 124 x7c R */
140 __le32 IPRxFragmentCount; /* 128 x80 R */
141 __le32 IPDatagramReassemblyCount; /* 132 x84 R */
142 __le32 IPV6RxPacketCount; /* 136 x88 R */
143 __le32 IPErrPacketCount; /* 140 x8c R */
144 __le32 IPReassemblyErrCount; /* 144 x90 R */
145 __le32 TCPTxSegmentCount; /* 148 x94 R */
146 __le32 TCPTxByteCount; /* 152 x98 R */
147 __le32 TCPRxSegmentCount; /* 156 x9c R */
148 __le32 TCPRxByteCount; /* 160 xa0 R */
149 __le32 TCPTimerExpCount; /* 164 xa4 R */
150 __le32 TCPRxAckCount; /* 168 xa8 R */
151 __le32 TCPTxAckCount; /* 172 xac R */
152 __le32 TCPRxErrOOOCount; /* 176 xb0 R */
153 __le32 PSReserved0; /* 180 xb4 */
154 __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
155 __le32 ECCErrCorrectionCount; /* 188 xbc R */
156 __le32 PSReserved1[16]; /* 192 xc0 */
157};
158
159
160/* remote register set (access via PCI memory read/write) */
161struct isp_reg {
162#define MBOX_REG_COUNT 8
163 __le32 mailbox[MBOX_REG_COUNT];
164
165 __le32 flash_address; /* 0x20 */
166 __le32 flash_data;
167 __le32 ctrl_status;
168
169 union {
170 struct {
171 __le32 nvram;
172 __le32 reserved1[2]; /* 0x30 */
173 } __attribute__ ((packed)) isp4010;
174 struct {
175 __le32 intr_mask;
176 __le32 nvram; /* 0x30 */
177 __le32 semaphore;
178 } __attribute__ ((packed)) isp4022;
179 } u1;
180
181 __le32 req_q_in; /* SCSI Request Queue Producer Index */
182 __le32 rsp_q_out; /* SCSI Completion Queue Consumer Index */
183
184 __le32 reserved2[4]; /* 0x40 */
185
186 union {
187 struct {
188 __le32 ext_hw_conf; /* 0x50 */
189 __le32 flow_ctrl;
190 __le32 port_ctrl;
191 __le32 port_status;
192
193 __le32 reserved3[8]; /* 0x60 */
194
195 __le32 req_q_out; /* 0x80 */
196
197 __le32 reserved4[23]; /* 0x84 */
198
199 __le32 gp_out; /* 0xe0 */
200 __le32 gp_in;
201
202 __le32 reserved5[5];
203
204 __le32 port_err_status; /* 0xfc */
205 } __attribute__ ((packed)) isp4010;
206 struct {
207 union {
208 struct port_ctrl_stat_regs p0;
209 struct host_mem_cfg_regs p1;
210 struct local_ram_cfg_regs p2;
211 struct prot_stat_regs p3;
212 __le32 r_union[44];
213 };
214
215 } __attribute__ ((packed)) isp4022;
216 } u2;
217}; /* 256 x100 */
218
219
220/* Semaphore Defines for 4010 */
221#define QL4010_DRVR_SEM_BITS 0x00000030
222#define QL4010_GPIO_SEM_BITS 0x000000c0
223#define QL4010_SDRAM_SEM_BITS 0x00000300
224#define QL4010_PHY_SEM_BITS 0x00000c00
225#define QL4010_NVRAM_SEM_BITS 0x00003000
226#define QL4010_FLASH_SEM_BITS 0x0000c000
227
228#define QL4010_DRVR_SEM_MASK 0x00300000
229#define QL4010_GPIO_SEM_MASK 0x00c00000
230#define QL4010_SDRAM_SEM_MASK 0x03000000
231#define QL4010_PHY_SEM_MASK 0x0c000000
232#define QL4010_NVRAM_SEM_MASK 0x30000000
233#define QL4010_FLASH_SEM_MASK 0xc0000000
234
235/* Semaphore Defines for 4022 */
236#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
237#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
238
239
240#define QL4022_DRVR_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
241#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
242#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
243#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
244#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
245
246
247
248/* Page # defines for 4022 */
249#define PORT_CTRL_STAT_PAGE 0 /* 4022 */
250#define HOST_MEM_CFG_PAGE 1 /* 4022 */
251#define LOCAL_RAM_CFG_PAGE 2 /* 4022 */
252#define PROT_STAT_PAGE 3 /* 4022 */
253
254/* Register Mask - sets corresponding mask bits in the upper word */
255static inline uint32_t set_rmask(uint32_t val)
256{
257 return (val & 0xffff) | (val << 16);
258}
259
260
261static inline uint32_t clr_rmask(uint32_t val)
262{
263 return 0 | (val << 16);
264}
265
266/* ctrl_status definitions */
267#define CSR_SCSI_PAGE_SELECT 0x00000003
268#define CSR_SCSI_INTR_ENABLE 0x00000004 /* 4010 */
269#define CSR_SCSI_RESET_INTR 0x00000008
270#define CSR_SCSI_COMPLETION_INTR 0x00000010
271#define CSR_SCSI_PROCESSOR_INTR 0x00000020
272#define CSR_INTR_RISC 0x00000040
273#define CSR_BOOT_ENABLE 0x00000080
274#define CSR_NET_PAGE_SELECT 0x00000300 /* 4010 */
275#define CSR_FUNC_NUM 0x00000700 /* 4022 */
276#define CSR_NET_RESET_INTR 0x00000800 /* 4010 */
277#define CSR_FORCE_SOFT_RESET 0x00002000 /* 4022 */
278#define CSR_FATAL_ERROR 0x00004000
279#define CSR_SOFT_RESET 0x00008000
280#define ISP_CONTROL_FN_MASK CSR_FUNC_NUM
281#define ISP_CONTROL_FN0_SCSI 0x0500
282#define ISP_CONTROL_FN1_SCSI 0x0700
283
284#define INTR_PENDING (CSR_SCSI_COMPLETION_INTR |\
285 CSR_SCSI_PROCESSOR_INTR |\
286 CSR_SCSI_RESET_INTR)
287
288/* ISP InterruptMask definitions */
289#define IMR_SCSI_INTR_ENABLE 0x00000004 /* 4022 */
290
291/* ISP 4022 nvram definitions */
292#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
293
294/* ISP port_status definitions */
295
296/* ISP Semaphore definitions */
297
298/* ISP General Purpose Output definitions */
299#define GPOR_TOPCAT_RESET 0x00000004
300
301/* shadow registers (DMA'd from HA to system memory. read only) */
302struct shadow_regs {
303 /* SCSI Request Queue Consumer Index */
304 __le32 req_q_out; /* 0 x0 R */
305
306 /* SCSI Completion Queue Producer Index */
307 __le32 rsp_q_in; /* 4 x4 R */
308}; /* 8 x8 */
309
310
311/* External hardware configuration register */
312union external_hw_config_reg {
313 struct {
314 /* FIXME: Do we even need this? All values are
315 * referred to by 16 bit quantities. Platform and
316 * endianess issues. */
317 __le32 bReserved0:1;
318 __le32 bSDRAMProtectionMethod:2;
319 __le32 bSDRAMBanks:1;
320 __le32 bSDRAMChipWidth:1;
321 __le32 bSDRAMChipSize:2;
322 __le32 bParityDisable:1;
323 __le32 bExternalMemoryType:1;
324 __le32 bFlashBIOSWriteEnable:1;
325 __le32 bFlashUpperBankSelect:1;
326 __le32 bWriteBurst:2;
327 __le32 bReserved1:3;
328 __le32 bMask:16;
329 };
330 uint32_t Asuint32_t;
331};
332
333/*************************************************************************
334 *
335 * Mailbox Commands Structures and Definitions
336 *
337 *************************************************************************/
338
339/* Mailbox command definitions */
340#define MBOX_CMD_ABOUT_FW 0x0009
341#define MBOX_CMD_LUN_RESET 0x0016
342#define MBOX_CMD_GET_FW_STATUS 0x001F
343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
344#define ISNS_DISABLE 0
345#define ISNS_ENABLE 1
346#define MBOX_CMD_READ_FLASH 0x0026
347#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
348#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
349#define LOGOUT_OPTION_CLOSE_SESSION 0x01
350#define LOGOUT_OPTION_RELOGIN 0x02
351#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
352#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
353#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
354#define MBOX_CMD_REQUEST_DATABASE_ENTRY 0x0062
355#define MBOX_CMD_SET_DATABASE_ENTRY 0x0063
356#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
357#define DDB_DS_UNASSIGNED 0x00
358#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
359#define DDB_DS_SESSION_ACTIVE 0x04
360#define DDB_DS_SESSION_FAILED 0x06
361#define DDB_DS_LOGIN_IN_PROCESS 0x07
362#define MBOX_CMD_GET_FW_STATE 0x0069
363
364/* Mailbox 1 */
365#define FW_STATE_READY 0x0000
366#define FW_STATE_CONFIG_WAIT 0x0001
367#define FW_STATE_ERROR 0x0004
368#define FW_STATE_DHCP_IN_PROGRESS 0x0008
369
370/* Mailbox 3 */
371#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
372#define FW_ADDSTATE_DHCP_ENABLED 0x0002
373#define FW_ADDSTATE_LINK_UP 0x0010
374#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
375#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
376#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
377#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
378#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
379
380/* Mailbox status definitions */
381#define MBOX_COMPLETION_STATUS 4
382#define MBOX_STS_BUSY 0x0007
383#define MBOX_STS_INTERMEDIATE_COMPLETION 0x1000
384#define MBOX_STS_COMMAND_COMPLETE 0x4000
385#define MBOX_STS_COMMAND_ERROR 0x4005
386
387#define MBOX_ASYNC_EVENT_STATUS 8
388#define MBOX_ASTS_SYSTEM_ERROR 0x8002
389#define MBOX_ASTS_REQUEST_TRANSFER_ERROR 0x8003
390#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR 0x8004
391#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM 0x8005
392#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED 0x8006
393#define MBOX_ASTS_LINK_UP 0x8010
394#define MBOX_ASTS_LINK_DOWN 0x8011
395#define MBOX_ASTS_DATABASE_CHANGED 0x8014
396#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED 0x8015
397#define MBOX_ASTS_SELF_TEST_FAILED 0x8016
398#define MBOX_ASTS_LOGIN_FAILED 0x8017
399#define MBOX_ASTS_DNS 0x8018
400#define MBOX_ASTS_HEARTBEAT 0x8019
401#define MBOX_ASTS_NVRAM_INVALID 0x801A
402#define MBOX_ASTS_MAC_ADDRESS_CHANGED 0x801B
403#define MBOX_ASTS_IP_ADDRESS_CHANGED 0x801C
404#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
405#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
406#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
407#define ISNS_EVENT_DATA_RECEIVED 0x0000
408#define ISNS_EVENT_CONNECTION_OPENED 0x0001
409#define ISNS_EVENT_CONNECTION_FAILED 0x0002
410#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
411#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
412
413/*************************************************************************/
414
415/* Host Adapter Initialization Control Block (from host) */
416struct init_fw_ctrl_blk {
417 uint8_t Version; /* 00 */
418 uint8_t Control; /* 01 */
419
420 uint16_t FwOptions; /* 02-03 */
421#define FWOPT_HEARTBEAT_ENABLE 0x1000
422#define FWOPT_SESSION_MODE 0x0040
423#define FWOPT_INITIATOR_MODE 0x0020
424#define FWOPT_TARGET_MODE 0x0010
425
426 uint16_t ExecThrottle; /* 04-05 */
427 uint8_t RetryCount; /* 06 */
428 uint8_t RetryDelay; /* 07 */
429 uint16_t MaxEthFrPayloadSize; /* 08-09 */
430 uint16_t AddFwOptions; /* 0A-0B */
431
432 uint8_t HeartbeatInterval; /* 0C */
433 uint8_t InstanceNumber; /* 0D */
434 uint16_t RES2; /* 0E-0F */
435 uint16_t ReqQConsumerIndex; /* 10-11 */
436 uint16_t ComplQProducerIndex; /* 12-13 */
437 uint16_t ReqQLen; /* 14-15 */
438 uint16_t ComplQLen; /* 16-17 */
439 uint32_t ReqQAddrLo; /* 18-1B */
440 uint32_t ReqQAddrHi; /* 1C-1F */
441 uint32_t ComplQAddrLo; /* 20-23 */
442 uint32_t ComplQAddrHi; /* 24-27 */
443 uint32_t ShadowRegBufAddrLo; /* 28-2B */
444 uint32_t ShadowRegBufAddrHi; /* 2C-2F */
445
446 uint16_t iSCSIOptions; /* 30-31 */
447
448 uint16_t TCPOptions; /* 32-33 */
449
450 uint16_t IPOptions; /* 34-35 */
451
452 uint16_t MaxPDUSize; /* 36-37 */
453 uint16_t RcvMarkerInt; /* 38-39 */
454 uint16_t SndMarkerInt; /* 3A-3B */
455 uint16_t InitMarkerlessInt; /* 3C-3D */
456 uint16_t FirstBurstSize; /* 3E-3F */
457 uint16_t DefaultTime2Wait; /* 40-41 */
458 uint16_t DefaultTime2Retain; /* 42-43 */
459 uint16_t MaxOutStndngR2T; /* 44-45 */
460 uint16_t KeepAliveTimeout; /* 46-47 */
461 uint16_t PortNumber; /* 48-49 */
462 uint16_t MaxBurstSize; /* 4A-4B */
463 uint32_t RES4; /* 4C-4F */
464 uint8_t IPAddr[4]; /* 50-53 */
465 uint8_t RES5[12]; /* 54-5F */
466 uint8_t SubnetMask[4]; /* 60-63 */
467 uint8_t RES6[12]; /* 64-6F */
468 uint8_t GatewayIPAddr[4]; /* 70-73 */
469 uint8_t RES7[12]; /* 74-7F */
470 uint8_t PriDNSIPAddr[4]; /* 80-83 */
471 uint8_t SecDNSIPAddr[4]; /* 84-87 */
472 uint8_t RES8[8]; /* 88-8F */
473 uint8_t Alias[32]; /* 90-AF */
474 uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */
475 uint8_t CHAPNameSecretsTable[8]; /* B8-BF */
476 uint8_t EthernetMACAddr[6]; /* C0-C5 */
477 uint16_t TargetPortalGroup; /* C6-C7 */
478 uint8_t SendScale; /* C8 */
479 uint8_t RecvScale; /* C9 */
480 uint8_t TypeOfService; /* CA */
481 uint8_t Time2Live; /* CB */
482 uint16_t VLANPriority; /* CC-CD */
483 uint16_t Reserved8; /* CE-CF */
484 uint8_t SecIPAddr[4]; /* D0-D3 */
485 uint8_t Reserved9[12]; /* D4-DF */
486 uint8_t iSNSIPAddr[4]; /* E0-E3 */
487 uint16_t iSNSServerPortNumber; /* E4-E5 */
488 uint8_t Reserved10[10]; /* E6-EF */
489 uint8_t SLPDAIPAddr[4]; /* F0-F3 */
490 uint8_t Reserved11[12]; /* F4-FF */
491 uint8_t iSCSINameString[256]; /* 100-1FF */
492};
493
494/*************************************************************************/
495
496struct dev_db_entry {
497 uint8_t options; /* 00 */
498#define DDB_OPT_DISC_SESSION 0x10
499#define DDB_OPT_TARGET 0x02 /* device is a target */
500
501 uint8_t control; /* 01 */
502
503 uint16_t exeThrottle; /* 02-03 */
504 uint16_t exeCount; /* 04-05 */
505 uint8_t retryCount; /* 06 */
506 uint8_t retryDelay; /* 07 */
507 uint16_t iSCSIOptions; /* 08-09 */
508
509 uint16_t TCPOptions; /* 0A-0B */
510
511 uint16_t IPOptions; /* 0C-0D */
512
513 uint16_t maxPDUSize; /* 0E-0F */
514 uint16_t rcvMarkerInt; /* 10-11 */
515 uint16_t sndMarkerInt; /* 12-13 */
516 uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
517 uint16_t firstBurstSize; /* 16-17 */
518 uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
519 uint16_t maxTime2Retain; /* 1A-1B */
520 uint16_t maxOutstndngR2T; /* 1C-1D */
521 uint16_t keepAliveTimeout; /* 1E-1F */
522 uint8_t ISID[6]; /* 20-25 big-endian, must be converted
523 * to little-endian */
524 uint16_t TSID; /* 26-27 */
525 uint16_t portNumber; /* 28-29 */
526 uint16_t maxBurstSize; /* 2A-2B */
527 uint16_t taskMngmntTimeout; /* 2C-2D */
528 uint16_t reserved1; /* 2E-2F */
529 uint8_t ipAddr[0x10]; /* 30-3F */
530 uint8_t iSCSIAlias[0x20]; /* 40-5F */
531 uint8_t targetAddr[0x20]; /* 60-7F */
532 uint8_t userID[0x20]; /* 80-9F */
533 uint8_t password[0x20]; /* A0-BF */
534 uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a
535 * pointer to a string so we
536 * don't have to reserve soooo
537 * much RAM */
538 uint16_t ddbLink; /* 1C0-1C1 */
539 uint16_t CHAPTableIndex; /* 1C2-1C3 */
540 uint16_t TargetPortalGroup; /* 1C4-1C5 */
541 uint16_t reserved2[2]; /* 1C6-1C7 */
542 uint32_t statSN; /* 1C8-1CB */
543 uint32_t expStatSN; /* 1CC-1CF */
544 uint16_t reserved3[0x2C]; /* 1D0-1FB */
545 uint16_t ddbValidCookie; /* 1FC-1FD */
546 uint16_t ddbValidSize; /* 1FE-1FF */
547};
548
549/*************************************************************************/
550
551/* Flash definitions */
552
553#define FLASH_OFFSET_SYS_INFO 0x02000000
554#define FLASH_DEFAULTBLOCKSIZE 0x20000
555#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
556 * for EOF
557 * signature */
558
559struct sys_info_phys_addr {
560 uint8_t address[6]; /* 00-05 */
561 uint8_t filler[2]; /* 06-07 */
562};
563
564struct flash_sys_info {
565 uint32_t cookie; /* 00-03 */
566 uint32_t physAddrCount; /* 04-07 */
567 struct sys_info_phys_addr physAddr[4]; /* 08-27 */
568 uint8_t vendorId[128]; /* 28-A7 */
569 uint8_t productId[128]; /* A8-127 */
570 uint32_t serialNumber; /* 128-12B */
571
572 /* PCI Configuration values */
573 uint32_t pciDeviceVendor; /* 12C-12F */
574 uint32_t pciDeviceId; /* 130-133 */
575 uint32_t pciSubsysVendor; /* 134-137 */
576 uint32_t pciSubsysId; /* 138-13B */
577
578 /* This validates version 1. */
579 uint32_t crumbs; /* 13C-13F */
580
581 uint32_t enterpriseNumber; /* 140-143 */
582
583 uint32_t mtu; /* 144-147 */
584 uint32_t reserved0; /* 148-14b */
585 uint32_t crumbs2; /* 14c-14f */
586 uint8_t acSerialNumber[16]; /* 150-15f */
587 uint32_t crumbs3; /* 160-16f */
588
589 /* Leave this last in the struct so it is declared invalid if
590 * any new items are added.
591 */
592 uint32_t reserved1[39]; /* 170-1ff */
593}; /* 200 */
594
595struct crash_record {
596 uint16_t fw_major_version; /* 00 - 01 */
597 uint16_t fw_minor_version; /* 02 - 03 */
598 uint16_t fw_patch_version; /* 04 - 05 */
599 uint16_t fw_build_version; /* 06 - 07 */
600
601 uint8_t build_date[16]; /* 08 - 17 */
602 uint8_t build_time[16]; /* 18 - 27 */
603 uint8_t build_user[16]; /* 28 - 37 */
604 uint8_t card_serial_num[16]; /* 38 - 47 */
605
606 uint32_t time_of_crash_in_secs; /* 48 - 4B */
607 uint32_t time_of_crash_in_ms; /* 4C - 4F */
608
609 uint16_t out_RISC_sd_num_frames; /* 50 - 51 */
610 uint16_t OAP_sd_num_words; /* 52 - 53 */
611 uint16_t IAP_sd_num_frames; /* 54 - 55 */
612 uint16_t in_RISC_sd_num_words; /* 56 - 57 */
613
614 uint8_t reserved1[28]; /* 58 - 7F */
615
616 uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
617 uint8_t in_RISC_reg_dump[256]; /*180 -27F */
618 uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
619};
620
621struct conn_event_log_entry {
622#define MAX_CONN_EVENT_LOG_ENTRIES 100
623 uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
624 uint32_t timestamp_ms; /* 04 - 07 milliseconds since boot */
625 uint16_t device_index; /* 08 - 09 */
626 uint16_t fw_conn_state; /* 0A - 0B */
627 uint8_t event_type; /* 0C - 0C */
628 uint8_t error_code; /* 0D - 0D */
629 uint16_t error_code_detail; /* 0E - 0F */
630 uint8_t num_consecutive_events; /* 10 - 10 */
631 uint8_t rsvd[3]; /* 11 - 13 */
632};
633
634/*************************************************************************
635 *
636 * IOCB Commands Structures and Definitions
637 *
638 *************************************************************************/
639#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
640#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
641
642/* IOCB header structure */
643struct qla4_header {
644 uint8_t entryType;
645#define ET_STATUS 0x03
646#define ET_MARKER 0x04
647#define ET_CONT_T1 0x0A
648#define ET_STATUS_CONTINUATION 0x10
649#define ET_CMND_T3 0x19
650#define ET_PASSTHRU0 0x3A
651#define ET_PASSTHRU_STATUS 0x3C
652
653 uint8_t entryStatus;
654 uint8_t systemDefined;
655 uint8_t entryCount;
656
657 /* SyetemDefined definition */
658};
659
660/* Generic queue entry structure*/
661struct queue_entry {
662 uint8_t data[60];
663 uint32_t signature;
664
665};
666
667/* 64 bit addressing segment counts*/
668
669#define COMMAND_SEG_A64 1
670#define CONTINUE_SEG_A64 5
671
672/* 64 bit addressing segment definition*/
673
674struct data_seg_a64 {
675 struct {
676 uint32_t addrLow;
677 uint32_t addrHigh;
678
679 } base;
680
681 uint32_t count;
682
683};
684
685/* Command Type 3 entry structure*/
686
687struct command_t3_entry {
688 struct qla4_header hdr; /* 00-03 */
689
690 uint32_t handle; /* 04-07 */
691 uint16_t target; /* 08-09 */
692 uint16_t connection_id; /* 0A-0B */
693
694 uint8_t control_flags; /* 0C */
695
696 /* data direction (bits 5-6) */
697#define CF_WRITE 0x20
698#define CF_READ 0x40
699#define CF_NO_DATA 0x00
700
701 /* task attributes (bits 2-0) */
702#define CF_HEAD_TAG 0x03
703#define CF_ORDERED_TAG 0x02
704#define CF_SIMPLE_TAG 0x01
705
706 /* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
707 * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
708 * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
709 * PROPERLY.
710 */
711 uint8_t state_flags; /* 0D */
712 uint8_t cmdRefNum; /* 0E */
713 uint8_t reserved1; /* 0F */
714 uint8_t cdb[IOCB_MAX_CDB_LEN]; /* 10-1F */
715 struct scsi_lun lun; /* FCP LUN (BE). */
716 uint32_t cmdSeqNum; /* 28-2B */
717 uint16_t timeout; /* 2C-2D */
718 uint16_t dataSegCnt; /* 2E-2F */
719 uint32_t ttlByteCnt; /* 30-33 */
720 struct data_seg_a64 dataseg[COMMAND_SEG_A64]; /* 34-3F */
721
722};
723
724
725/* Continuation Type 1 entry structure*/
726struct continuation_t1_entry {
727 struct qla4_header hdr;
728
729 struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
730
731};
732
733/* Parameterize for 64 or 32 bits */
734#define COMMAND_SEG COMMAND_SEG_A64
735#define CONTINUE_SEG CONTINUE_SEG_A64
736
737#define ET_COMMAND ET_CMND_T3
738#define ET_CONTINUE ET_CONT_T1
739
740/* Marker entry structure*/
741struct marker_entry {
742 struct qla4_header hdr; /* 00-03 */
743
744 uint32_t system_defined; /* 04-07 */
745 uint16_t target; /* 08-09 */
746 uint16_t modifier; /* 0A-0B */
747#define MM_LUN_RESET 0
748
749 uint16_t flags; /* 0C-0D */
750 uint16_t reserved1; /* 0E-0F */
751 struct scsi_lun lun; /* FCP LUN (BE). */
752 uint64_t reserved2; /* 18-1F */
753 uint64_t reserved3; /* 20-27 */
754 uint64_t reserved4; /* 28-2F */
755 uint64_t reserved5; /* 30-37 */
756 uint64_t reserved6; /* 38-3F */
757};
758
759/* Status entry structure*/
760struct status_entry {
761 struct qla4_header hdr; /* 00-03 */
762
763 uint32_t handle; /* 04-07 */
764
765 uint8_t scsiStatus; /* 08 */
766#define SCSI_CHECK_CONDITION 0x02
767
768 uint8_t iscsiFlags; /* 09 */
769#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
770#define ISCSI_FLAG_RESIDUAL_OVER 0x04
771
772 uint8_t iscsiResponse; /* 0A */
773
774 uint8_t completionStatus; /* 0B */
775#define SCS_COMPLETE 0x00
776#define SCS_INCOMPLETE 0x01
777#define SCS_RESET_OCCURRED 0x04
778#define SCS_ABORTED 0x05
779#define SCS_TIMEOUT 0x06
780#define SCS_DATA_OVERRUN 0x07
781#define SCS_DATA_UNDERRUN 0x15
782#define SCS_QUEUE_FULL 0x1C
783#define SCS_DEVICE_UNAVAILABLE 0x28
784#define SCS_DEVICE_LOGGED_OUT 0x29
785
786 uint8_t reserved1; /* 0C */
787
788 /* state_flags MUST be at the same location as state_flags in
789 * the Command_T3/4_Entry */
790 uint8_t state_flags; /* 0D */
791
792 uint16_t senseDataByteCnt; /* 0E-0F */
793 uint32_t residualByteCnt; /* 10-13 */
794 uint32_t bidiResidualByteCnt; /* 14-17 */
795 uint32_t expSeqNum; /* 18-1B */
796 uint32_t maxCmdSeqNum; /* 1C-1F */
797 uint8_t senseData[IOCB_MAX_SENSEDATA_LEN]; /* 20-3F */
798
799};
800
801struct passthru0 {
802 struct qla4_header hdr; /* 00-03 */
803 uint32_t handle; /* 04-07 */
804 uint16_t target; /* 08-09 */
805 uint16_t connectionID; /* 0A-0B */
806#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
807
808 uint16_t controlFlags; /* 0C-0D */
809#define PT_FLAG_ETHERNET_FRAME 0x8000
810#define PT_FLAG_ISNS_PDU 0x8000
811#define PT_FLAG_SEND_BUFFER 0x0200
812#define PT_FLAG_WAIT_4_RESPONSE 0x0100
813
814 uint16_t timeout; /* 0E-0F */
815#define PT_DEFAULT_TIMEOUT 30 /* seconds */
816
817 struct data_seg_a64 outDataSeg64; /* 10-1B */
818 uint32_t res1; /* 1C-1F */
819 struct data_seg_a64 inDataSeg64; /* 20-2B */
820 uint8_t res2[20]; /* 2C-3F */
821};
822
823struct passthru_status {
824 struct qla4_header hdr; /* 00-03 */
825 uint32_t handle; /* 04-07 */
826 uint16_t target; /* 08-09 */
827 uint16_t connectionID; /* 0A-0B */
828
829 uint8_t completionStatus; /* 0C */
830#define PASSTHRU_STATUS_COMPLETE 0x01
831
832 uint8_t residualFlags; /* 0D */
833
834 uint16_t timeout; /* 0E-0F */
835 uint16_t portNumber; /* 10-11 */
836 uint8_t res1[10]; /* 12-1B */
837 uint32_t outResidual; /* 1C-1F */
838 uint8_t res2[12]; /* 20-2B */
839 uint32_t inResidual; /* 2C-2F */
840 uint8_t res4[16]; /* 30-3F */
841};
842
843#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
new file mode 100644
index 00000000000..418fb7a13a6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -0,0 +1,78 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H
10
11int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
12int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
13int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
14 uint8_t renew_ddb_list);
15int qla4xxx_soft_reset(struct scsi_qla_host *ha);
16irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs);
17
18void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
19void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
20
21int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
22int qla4xxx_relogin_device(struct scsi_qla_host * ha,
23 struct ddb_entry * ddb_entry);
24int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
25 int lun);
26int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
27 uint32_t offset, uint32_t len);
28int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
29int qla4xxx_get_firmware_state(struct scsi_qla_host * ha);
30int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha);
31
32/* FIXME: Goodness! this really wants a small struct to hold the
33 * parameters. On x86 the args will get passed on the stack! */
34int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
35 uint16_t fw_ddb_index,
36 struct dev_db_entry *fw_ddb_entry,
37 dma_addr_t fw_ddb_entry_dma,
38 uint32_t *num_valid_ddb_entries,
39 uint32_t *next_ddb_index,
40 uint32_t *fw_ddb_device_state,
41 uint32_t *conn_err_detail,
42 uint16_t *tcp_source_port_num,
43 uint16_t *connection_id);
44
45struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
46 uint32_t fw_ddb_index);
47int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
48 dma_addr_t fw_ddb_entry_dma);
49
50void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
51 struct ddb_entry *ddb_entry);
52u16 rd_nvram_word(struct scsi_qla_host * ha, int offset);
53void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
54struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
55int qla4xxx_add_sess(struct ddb_entry *);
56void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
57int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
58 uint16_t fw_ddb_index,
59 uint16_t connection_id,
60 uint16_t option);
61int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
62 uint16_t fw_ddb_index);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
64int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
66 uint32_t intr_status);
67int qla4xxx_init_rings(struct scsi_qla_host * ha);
68void qla4xxx_dump_buffer(void *b, uint32_t size);
69struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
70void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
71int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
72int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
73 uint32_t fw_ddb_index, uint32_t state);
74
75extern int extended_error_logging;
76extern int ql4xdiscoverywait;
77extern int ql4xdontresethba;
78#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
new file mode 100644
index 00000000000..bb3a1c11f44
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -0,0 +1,1340 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10/*
11 * QLogic ISP4xxx Hardware Support Function Prototypes.
12 */
13
14static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
15{
16 uint32_t value;
17 uint8_t func_number;
18 unsigned long flags;
19
20 /* Get the function number */
21 spin_lock_irqsave(&ha->hardware_lock, flags);
22 value = readw(&ha->reg->ctrl_status);
23 spin_unlock_irqrestore(&ha->hardware_lock, flags);
24
25 func_number = (uint8_t) ((value >> 4) & 0x30);
26 switch (value & ISP_CONTROL_FN_MASK) {
27 case ISP_CONTROL_FN0_SCSI:
28 ha->mac_index = 1;
29 break;
30 case ISP_CONTROL_FN1_SCSI:
31 ha->mac_index = 3;
32 break;
33 default:
34 DEBUG2(printk("scsi%ld: %s: Invalid function number, "
35 "ispControlStatus = 0x%x\n", ha->host_no,
36 __func__, value));
37 break;
38 }
39 DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
40 ha->mac_index));
41}
42
43/**
44 * qla4xxx_free_ddb - deallocate ddb
45 * @ha: pointer to host adapter structure.
46 * @ddb_entry: pointer to device database entry
47 *
48 * This routine deallocates and unlinks the specified ddb_entry from the
49 * adapter's
50 **/
51void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry)
52{
53 /* Remove device entry from list */
54 list_del_init(&ddb_entry->list);
55
56 /* Remove device pointer from index mapping arrays */
57 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
58 (struct ddb_entry *) INVALID_ENTRY;
59 ha->tot_ddbs--;
60
61 /* Free memory and scsi-ml struct for device entry */
62 qla4xxx_destroy_sess(ddb_entry);
63}
64
65/**
66 * qla4xxx_free_ddb_list - deallocate all ddbs
67 * @ha: pointer to host adapter structure.
68 *
69 * This routine deallocates and removes all devices on the sppecified adapter.
70 **/
71void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
72{
73 struct list_head *ptr;
74 struct ddb_entry *ddb_entry;
75
76 while (!list_empty(&ha->ddb_list)) {
77 ptr = ha->ddb_list.next;
78 /* Free memory for device entry and remove */
79 ddb_entry = list_entry(ptr, struct ddb_entry, list);
80 qla4xxx_free_ddb(ha, ddb_entry);
81 }
82}
83
84/**
85 * qla4xxx_init_rings - initialize hw queues
86 * @ha: pointer to host adapter structure.
87 *
88 * This routine initializes the internal queues for the specified adapter.
89 * The QLA4010 requires us to restart the queues at index 0.
90 * The QLA4000 doesn't care, so just default to QLA4010's requirement.
91 **/
92int qla4xxx_init_rings(struct scsi_qla_host *ha)
93{
94 unsigned long flags = 0;
95
96 /* Initialize request queue. */
97 spin_lock_irqsave(&ha->hardware_lock, flags);
98 ha->request_out = 0;
99 ha->request_in = 0;
100 ha->request_ptr = &ha->request_ring[ha->request_in];
101 ha->req_q_count = REQUEST_QUEUE_DEPTH;
102
103 /* Initialize response queue. */
104 ha->response_in = 0;
105 ha->response_out = 0;
106 ha->response_ptr = &ha->response_ring[ha->response_out];
107
108 /*
109 * Initialize DMA Shadow registers. The firmware is really supposed to
110 * take care of this, but on some uniprocessor systems, the shadow
111 * registers aren't cleared-- causing the interrupt_handler to think
112 * there are responses to be processed when there aren't.
113 */
114 ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
115 ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
116 wmb();
117
118 writel(0, &ha->reg->req_q_in);
119 writel(0, &ha->reg->rsp_q_out);
120 readl(&ha->reg->rsp_q_out);
121
122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
123
124 return QLA_SUCCESS;
125}
126
127/**
128 * qla4xxx_validate_mac_address - validate adapter MAC address(es)
129 * @ha: pointer to host adapter structure.
130 *
131 **/
132static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
133{
134 struct flash_sys_info *sys_info;
135 dma_addr_t sys_info_dma;
136 int status = QLA_ERROR;
137
138 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
139 &sys_info_dma, GFP_KERNEL);
140 if (sys_info == NULL) {
141 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
142 ha->host_no, __func__));
143
144 goto exit_validate_mac_no_free;
145 }
146 memset(sys_info, 0, sizeof(*sys_info));
147
148 /* Get flash sys info */
149 if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
150 sizeof(*sys_info)) != QLA_SUCCESS) {
151 DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
152 "failed\n", ha->host_no, __func__));
153
154 goto exit_validate_mac;
155 }
156
157 /* Save M.A.C. address & serial_number */
158 memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
159 min(sizeof(ha->my_mac),
160 sizeof(sys_info->physAddr[0].address)));
161 memcpy(ha->serial_number, &sys_info->acSerialNumber,
162 min(sizeof(ha->serial_number),
163 sizeof(sys_info->acSerialNumber)));
164
165 status = QLA_SUCCESS;
166
167 exit_validate_mac:
168 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
169 sys_info_dma);
170
171 exit_validate_mac_no_free:
172 return status;
173}
174
175/**
176 * qla4xxx_init_local_data - initialize adapter specific local data
177 * @ha: pointer to host adapter structure.
178 *
179 **/
180static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
181{
182 /* Initilize aen queue */
183 ha->aen_q_count = MAX_AEN_ENTRIES;
184
185 return qla4xxx_get_firmware_status(ha);
186}
187
188static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
189{
190 uint32_t timeout_count;
191 int ready = 0;
192
193 DEBUG2(dev_info(&ha->pdev->dev, "Waiting for Firmware Ready..\n"));
194 for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
195 timeout_count--) {
196 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
197 qla4xxx_get_dhcp_ip_address(ha);
198
199 /* Get firmware state. */
200 if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
201 DEBUG2(printk("scsi%ld: %s: unable to get firmware "
202 "state\n", ha->host_no, __func__));
203 break;
204
205 }
206
207 if (ha->firmware_state & FW_STATE_ERROR) {
208 DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
209 " occurred\n", ha->host_no, __func__));
210 break;
211
212 }
213 if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
214 /*
215 * The firmware has not yet been issued an Initialize
216 * Firmware command, so issue it now.
217 */
218 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
219 break;
220
221 /* Go back and test for ready state - no wait. */
222 continue;
223 }
224
225 if (ha->firmware_state == FW_STATE_READY) {
226 DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
227 /* The firmware is ready to process SCSI commands. */
228 DEBUG2(dev_info(&ha->pdev->dev,
229 "scsi%ld: %s: MEDIA TYPE - %s\n",
230 ha->host_no,
231 __func__, (ha->addl_fw_state &
232 FW_ADDSTATE_OPTICAL_MEDIA)
233 != 0 ? "OPTICAL" : "COPPER"));
234 DEBUG2(dev_info(&ha->pdev->dev,
235 "scsi%ld: %s: DHCP STATE Enabled "
236 "%s\n",
237 ha->host_no, __func__,
238 (ha->addl_fw_state &
239 FW_ADDSTATE_DHCP_ENABLED) != 0 ?
240 "YES" : "NO"));
241 DEBUG2(dev_info(&ha->pdev->dev,
242 "scsi%ld: %s: LINK %s\n",
243 ha->host_no, __func__,
244 (ha->addl_fw_state &
245 FW_ADDSTATE_LINK_UP) != 0 ?
246 "UP" : "DOWN"));
247 DEBUG2(dev_info(&ha->pdev->dev,
248 "scsi%ld: %s: iSNS Service "
249 "Started %s\n",
250 ha->host_no, __func__,
251 (ha->addl_fw_state &
252 FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
253 "YES" : "NO"));
254
255 ready = 1;
256 break;
257 }
258 DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
259 "seconds expired= %d\n", ha->host_no, __func__,
260 ha->firmware_state, ha->addl_fw_state,
261 timeout_count));
262 msleep(1000);
263 } /* end of for */
264
265 if (timeout_count <= 0)
266 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
267 ha->host_no, __func__));
268
269 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) {
270 DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
271 " grab an IP address from DHCP server\n",
272 ha->host_no, __func__));
273 ready = 1;
274 }
275
276 return ready;
277}
278
279/**
280 * qla4xxx_init_firmware - initializes the firmware.
281 * @ha: pointer to host adapter structure.
282 *
283 **/
284static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
285{
286 int status = QLA_ERROR;
287
288 dev_info(&ha->pdev->dev, "Initializing firmware..\n");
289 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
290 DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
291 "control block\n", ha->host_no, __func__));
292 return status;
293 }
294 if (!qla4xxx_fw_ready(ha))
295 return status;
296
297 set_bit(AF_ONLINE, &ha->flags);
298 return qla4xxx_get_firmware_status(ha);
299}
300
301static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
302 uint32_t fw_ddb_index)
303{
304 struct dev_db_entry *fw_ddb_entry = NULL;
305 dma_addr_t fw_ddb_entry_dma;
306 struct ddb_entry *ddb_entry = NULL;
307 int found = 0;
308 uint32_t device_state;
309
310 /* Make sure the dma buffer is valid */
311 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
312 sizeof(*fw_ddb_entry),
313 &fw_ddb_entry_dma, GFP_KERNEL);
314 if (fw_ddb_entry == NULL) {
315 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
316 ha->host_no, __func__));
317 return NULL;
318 }
319
320 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
321 fw_ddb_entry_dma, NULL, NULL,
322 &device_state, NULL, NULL, NULL) ==
323 QLA_ERROR) {
324 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
325 "fw_ddb_index %d\n", ha->host_no, __func__,
326 fw_ddb_index));
327 return NULL;
328 }
329
330 /* Allocate DDB if not already allocated. */
331 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
332 __func__, fw_ddb_index));
333 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
334 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName,
335 ISCSI_NAME_SIZE) == 0) {
336 found++;
337 break;
338 }
339 }
340
341 if (!found) {
342 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
343 "new ddb\n", ha->host_no, __func__,
344 fw_ddb_index));
345 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
346 }
347
348 /* if not found allocate new ddb */
349 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
350 fw_ddb_entry_dma);
351
352 return ddb_entry;
353}
354
355/**
356 * qla4xxx_update_ddb_entry - update driver's internal ddb
357 * @ha: pointer to host adapter structure.
358 * @ddb_entry: pointer to device database structure to be filled
359 * @fw_ddb_index: index of the ddb entry in fw ddb table
360 *
361 * This routine updates the driver's internal device database entry
362 * with information retrieved from the firmware's device database
363 * entry for the specified device. The ddb_entry->fw_ddb_index field
364 * must be initialized prior to calling this routine
365 *
366 **/
367int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
368 struct ddb_entry *ddb_entry,
369 uint32_t fw_ddb_index)
370{
371 struct dev_db_entry *fw_ddb_entry = NULL;
372 dma_addr_t fw_ddb_entry_dma;
373 int status = QLA_ERROR;
374
375 if (ddb_entry == NULL) {
376 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
377 __func__));
378 goto exit_update_ddb;
379 }
380
381 /* Make sure the dma buffer is valid */
382 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
383 sizeof(*fw_ddb_entry),
384 &fw_ddb_entry_dma, GFP_KERNEL);
385 if (fw_ddb_entry == NULL) {
386 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
387 ha->host_no, __func__));
388
389 goto exit_update_ddb;
390 }
391
392 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
393 fw_ddb_entry_dma, NULL, NULL,
394 &ddb_entry->fw_ddb_device_state, NULL,
395 &ddb_entry->tcp_source_port_num,
396 &ddb_entry->connection_id) ==
397 QLA_ERROR) {
398 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
399 "fw_ddb_index %d\n", ha->host_no, __func__,
400 fw_ddb_index));
401
402 goto exit_update_ddb;
403 }
404
405 status = QLA_SUCCESS;
406 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID);
407 ddb_entry->task_mgmt_timeout =
408 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
409 ddb_entry->CmdSn = 0;
410 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle);
411 ddb_entry->default_relogin_timeout =
412 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
413 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait);
414
415 /* Update index in case it changed */
416 ddb_entry->fw_ddb_index = fw_ddb_index;
417 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
418
419 ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber);
420 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup);
421 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0],
422 min(sizeof(ddb_entry->iscsi_name),
423 sizeof(fw_ddb_entry->iscsiName)));
424 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0],
425 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr)));
426
427 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
428 ha->host_no, __func__, fw_ddb_index,
429 ddb_entry->fw_ddb_device_state, status));
430
431 exit_update_ddb:
432 if (fw_ddb_entry)
433 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
434 fw_ddb_entry, fw_ddb_entry_dma);
435
436 return status;
437}
438
439/**
440 * qla4xxx_alloc_ddb - allocate device database entry
441 * @ha: Pointer to host adapter structure.
442 * @fw_ddb_index: Firmware's device database index
443 *
444 * This routine allocates a ddb_entry, ititializes some values, and
445 * inserts it into the ddb list.
446 **/
447struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
448 uint32_t fw_ddb_index)
449{
450 struct ddb_entry *ddb_entry;
451
452 DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no,
453 __func__, fw_ddb_index));
454
455 ddb_entry = qla4xxx_alloc_sess(ha);
456 if (ddb_entry == NULL) {
457 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
458 "to add fw_ddb_index [%d]\n",
459 ha->host_no, __func__, fw_ddb_index));
460 return ddb_entry;
461 }
462
463 ddb_entry->fw_ddb_index = fw_ddb_index;
464 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
465 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
466 atomic_set(&ddb_entry->relogin_timer, 0);
467 atomic_set(&ddb_entry->relogin_retry_count, 0);
468 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
469 list_add_tail(&ddb_entry->list, &ha->ddb_list);
470 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
471 ha->tot_ddbs++;
472
473 return ddb_entry;
474}
475
476/**
477 * qla4xxx_configure_ddbs - builds driver ddb list
478 * @ha: Pointer to host adapter structure.
479 *
480 * This routine searches for all valid firmware ddb entries and builds
481 * an internal ddb list. Ddbs that are considered valid are those with
482 * a device state of SESSION_ACTIVE.
483 **/
484static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
485{
486 int status = QLA_SUCCESS;
487 uint32_t fw_ddb_index = 0;
488 uint32_t next_fw_ddb_index = 0;
489 uint32_t ddb_state;
490 uint32_t conn_err, err_code;
491 struct ddb_entry *ddb_entry;
492
493 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
494 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
495 fw_ddb_index = next_fw_ddb_index) {
496 /* First, let's see if a device exists here */
497 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
498 &next_fw_ddb_index, &ddb_state,
499 &conn_err, NULL, NULL) ==
500 QLA_ERROR) {
501 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
502 "fw_ddb_index %d failed", ha->host_no,
503 __func__, fw_ddb_index));
504 return QLA_ERROR;
505 }
506
507 DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
508 "next_fw_ddb_index=%d.\n", ha->host_no, __func__,
509 fw_ddb_index, ddb_state, next_fw_ddb_index));
510
511 /* Issue relogin, if necessary. */
512 if (ddb_state == DDB_DS_SESSION_FAILED ||
513 ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) {
514 /* Try and login to device */
515 DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
516 ha->host_no, __func__, fw_ddb_index));
517 err_code = ((conn_err & 0x00ff0000) >> 16);
518 if (err_code == 0x1c || err_code == 0x06) {
519 DEBUG2(printk("scsi%ld: %s send target "
520 "completed "
521 "or access denied failure\n",
522 ha->host_no, __func__));
523 } else
524 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
525 }
526
527 if (ddb_state != DDB_DS_SESSION_ACTIVE)
528 goto next_one;
529 /*
530 * if fw_ddb with session active state found,
531 * add to ddb_list
532 */
533 DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n",
534 ha->host_no, __func__, fw_ddb_index));
535
536 /* Add DDB to internal our ddb list. */
537 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
538 if (ddb_entry == NULL) {
539 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
540 "for device at fw_ddb_index %d\n",
541 ha->host_no, __func__, fw_ddb_index));
542 return QLA_ERROR;
543 }
544 /* Fill in the device structure */
545 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
546 QLA_ERROR) {
547 ha->fw_ddb_index_map[fw_ddb_index] =
548 (struct ddb_entry *)INVALID_ENTRY;
549
550
551 DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
552 "for fw_ddb_index %d.\n",
553 ha->host_no, __func__, fw_ddb_index));
554 return QLA_ERROR;
555 }
556
557next_one:
558 /* We know we've reached the last device when
559 * next_fw_ddb_index is 0 */
560 if (next_fw_ddb_index == 0)
561 break;
562 }
563
564 dev_info(&ha->pdev->dev, "DDB list done..\n");
565
566 return status;
567}
568
569struct qla4_relog_scan {
570 int halt_wait;
571 uint32_t conn_err;
572 uint32_t err_code;
573 uint32_t fw_ddb_index;
574 uint32_t next_fw_ddb_index;
575 uint32_t fw_ddb_device_state;
576};
577
578static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
579{
580 struct ddb_entry *ddb_entry;
581
582 /*
583 * Don't want to do a relogin if connection
584 * error is 0x1c.
585 */
586 rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
587 if (rs->err_code == 0x1c || rs->err_code == 0x06) {
588 DEBUG2(printk(
589 "scsi%ld: %s send target"
590 " completed or "
591 "access denied failure\n",
592 ha->host_no, __func__));
593 } else {
594 /* We either have a device that is in
595 * the process of relogging in or a
596 * device that is waiting to be
597 * relogged in */
598 rs->halt_wait = 0;
599
600 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
601 rs->fw_ddb_index);
602 if (ddb_entry == NULL)
603 return QLA_ERROR;
604
605 if (ddb_entry->dev_scan_wait_to_start_relogin != 0
606 && time_after_eq(jiffies,
607 ddb_entry->
608 dev_scan_wait_to_start_relogin))
609 {
610 ddb_entry->dev_scan_wait_to_start_relogin = 0;
611 qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0);
612 }
613 }
614 return QLA_SUCCESS;
615}
616
617static int qla4_scan_for_relogin(struct scsi_qla_host *ha,
618 struct qla4_relog_scan *rs)
619{
620 int error;
621
622 /* scan for relogins
623 * ----------------- */
624 for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES;
625 rs->fw_ddb_index = rs->next_fw_ddb_index) {
626 if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0,
627 NULL, &rs->next_fw_ddb_index,
628 &rs->fw_ddb_device_state,
629 &rs->conn_err, NULL, NULL)
630 == QLA_ERROR)
631 return QLA_ERROR;
632
633 if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS)
634 rs->halt_wait = 0;
635
636 if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED ||
637 rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) {
638 error = qla4_test_rdy(ha, rs);
639 if (error)
640 return error;
641 }
642
643 /* We know we've reached the last device when
644 * next_fw_ddb_index is 0 */
645 if (rs->next_fw_ddb_index == 0)
646 break;
647 }
648 return QLA_SUCCESS;
649}
650
651/**
652 * qla4xxx_devices_ready - wait for target devices to be logged in
653 * @ha: pointer to adapter structure
654 *
655 * This routine waits up to ql4xdiscoverywait seconds
656 * F/W database during driver load time.
657 **/
658static int qla4xxx_devices_ready(struct scsi_qla_host *ha)
659{
660 int error;
661 unsigned long discovery_wtime;
662 struct qla4_relog_scan rs;
663
664 discovery_wtime = jiffies + (ql4xdiscoverywait * HZ);
665
666 DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait));
667 do {
668 /* poll for AEN. */
669 qla4xxx_get_firmware_state(ha);
670 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) {
671 /* Set time-between-relogin timer */
672 qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS);
673 }
674
675 /* if no relogins active or needed, halt discvery wait */
676 rs.halt_wait = 1;
677
678 error = qla4_scan_for_relogin(ha, &rs);
679
680 if (rs.halt_wait) {
681 DEBUG2(printk("scsi%ld: %s: Delay halted. Devices "
682 "Ready.\n", ha->host_no, __func__));
683 return QLA_SUCCESS;
684 }
685
686 msleep(2000);
687 } while (!time_after_eq(jiffies, discovery_wtime));
688
689 DEBUG3(qla4xxx_get_conn_event_log(ha));
690
691 return QLA_SUCCESS;
692}
693
694static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
695{
696 unsigned long wtime;
697
698 /* Flush the 0x8014 AEN from the firmware as a result of
699 * Auto connect. We are basically doing get_firmware_ddb()
700 * to determine whether we need to log back in or not.
701 * Trying to do a set ddb before we have processed 0x8014
702 * will result in another set_ddb() for the same ddb. In other
703 * words there will be stale entries in the aen_q.
704 */
705 wtime = jiffies + (2 * HZ);
706 do {
707 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
708 if (ha->firmware_state & (BIT_2 | BIT_0))
709 return;
710
711 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
712 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
713
714 msleep(1000);
715 } while (!time_after_eq(jiffies, wtime));
716
717}
718
719static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
720{
721 uint16_t fw_ddb_index;
722 int status = QLA_SUCCESS;
723
724 /* free the ddb list if is not empty */
725 if (!list_empty(&ha->ddb_list))
726 qla4xxx_free_ddb_list(ha);
727
728 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
729 ha->fw_ddb_index_map[fw_ddb_index] =
730 (struct ddb_entry *)INVALID_ENTRY;
731
732 ha->tot_ddbs = 0;
733
734 qla4xxx_flush_AENS(ha);
735
736 /*
737 * First perform device discovery for active
738 * fw ddb indexes and build
739 * ddb list.
740 */
741 if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
742 return status;
743
744 /* Wait for an AEN */
745 qla4xxx_devices_ready(ha);
746
747 /*
748 * Targets can come online after the inital discovery, so processing
749 * the aens here will catch them.
750 */
751 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
752 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
753
754 return status;
755}
756
757/**
758 * qla4xxx_update_ddb_list - update the driver ddb list
759 * @ha: pointer to host adapter structure.
760 *
761 * This routine obtains device information from the F/W database after
762 * firmware or adapter resets. The device table is preserved.
763 **/
764int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
765{
766 int status = QLA_SUCCESS;
767 struct ddb_entry *ddb_entry, *detemp;
768
769 /* Update the device information for all devices. */
770 list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) {
771 qla4xxx_update_ddb_entry(ha, ddb_entry,
772 ddb_entry->fw_ddb_index);
773 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
774 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
775 DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
776 "ONLINE\n", ha->host_no, __func__,
777 ddb_entry->fw_ddb_index));
778 } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
779 qla4xxx_mark_device_missing(ha, ddb_entry);
780 }
781 return status;
782}
783
784/**
785 * qla4xxx_relogin_device - re-establish session
786 * @ha: Pointer to host adapter structure.
787 * @ddb_entry: Pointer to device database entry
788 *
789 * This routine does a session relogin with the specified device.
790 * The ddb entry must be assigned prior to making this call.
791 **/
792int qla4xxx_relogin_device(struct scsi_qla_host *ha,
793 struct ddb_entry * ddb_entry)
794{
795 uint16_t relogin_timer;
796
797 relogin_timer = max(ddb_entry->default_relogin_timeout,
798 (uint16_t)RELOGIN_TOV);
799 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
800
801 DEBUG2(printk("scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
802 ddb_entry->fw_ddb_index, relogin_timer));
803
804 qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
805
806 return QLA_SUCCESS;
807}
808
809/**
810 * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
811 * @ha: Pointer to host adapter structure.
812 *
813 **/
814static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
815{
816 unsigned long flags;
817 uint16_t topcat;
818
819 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
820 return QLA_ERROR;
821 spin_lock_irqsave(&ha->hardware_lock, flags);
822 topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
823 isp4010.topcat));
824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
825
826 if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
827 set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
828 else
829 clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
830 ql4xxx_unlock_nvram(ha);
831 return QLA_SUCCESS;
832}
833
834
835static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
836{
837 unsigned long flags;
838 union external_hw_config_reg extHwConfig;
839
840 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
841 __func__));
842 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
843 return (QLA_ERROR);
844 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
845 ql4xxx_unlock_flash(ha);
846 return (QLA_ERROR);
847 }
848
849 /* Get EEPRom Parameters from NVRAM and validate */
850 dev_info(&ha->pdev->dev, "Configuring NVRAM ...\n");
851 if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
852 spin_lock_irqsave(&ha->hardware_lock, flags);
853 extHwConfig.Asuint32_t =
854 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
856 } else {
857 /*
858 * QLogic adapters should always have a valid NVRAM.
859 * If not valid, do not load.
860 */
861 dev_warn(&ha->pdev->dev,
862 "scsi%ld: %s: EEProm checksum invalid. "
863 "Please update your EEPROM\n", ha->host_no,
864 __func__);
865
866 /* set defaults */
867 if (is_qla4010(ha))
868 extHwConfig.Asuint32_t = 0x1912;
869 else if (is_qla4022(ha))
870 extHwConfig.Asuint32_t = 0x0023;
871 }
872 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
873 ha->host_no, __func__, extHwConfig.Asuint32_t));
874
875 spin_lock_irqsave(&ha->hardware_lock, flags);
876 writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
877 readl(isp_ext_hw_conf(ha));
878 spin_unlock_irqrestore(&ha->hardware_lock, flags);
879
880 ql4xxx_unlock_nvram(ha);
881 ql4xxx_unlock_flash(ha);
882
883 return (QLA_SUCCESS);
884}
885
886static void qla4x00_pci_config(struct scsi_qla_host *ha)
887{
888 uint16_t w, mwi;
889
890 dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
891
892 pci_set_master(ha->pdev);
893 mwi = 0;
894 if (pci_set_mwi(ha->pdev))
895 mwi = PCI_COMMAND_INVALIDATE;
896 /*
897 * We want to respect framework's setting of PCI configuration space
898 * command register and also want to make sure that all bits of
899 * interest to us are properly set in command register.
900 */
901 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
902 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
903 w &= ~PCI_COMMAND_INTX_DISABLE;
904 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
905}
906
907static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
908{
909 int status = QLA_ERROR;
910 uint32_t max_wait_time;
911 unsigned long flags;
912 uint32_t mbox_status;
913
914 dev_info(&ha->pdev->dev, "Starting firmware ...\n");
915
916 /*
917 * Start firmware from flash ROM
918 *
919 * WORKAROUND: Stuff a non-constant value that the firmware can
920 * use as a seed for a random number generator in MB7 prior to
921 * setting BOOT_ENABLE. Fixes problem where the TCP
922 * connections use the same TCP ports after each reboot,
923 * causing some connections to not get re-established.
924 */
925 DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
926 ha->host_no, __func__));
927
928 spin_lock_irqsave(&ha->hardware_lock, flags);
929 writel(jiffies, &ha->reg->mailbox[7]);
930 if (is_qla4022(ha))
931 writel(set_rmask(NVR_WRITE_ENABLE),
932 &ha->reg->u1.isp4022.nvram);
933
934 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
935 readl(&ha->reg->ctrl_status);
936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
937
938 /* Wait for firmware to come UP. */
939 max_wait_time = FIRMWARE_UP_TOV * 4;
940 do {
941 uint32_t ctrl_status;
942
943 spin_lock_irqsave(&ha->hardware_lock, flags);
944 ctrl_status = readw(&ha->reg->ctrl_status);
945 mbox_status = readw(&ha->reg->mailbox[0]);
946 spin_unlock_irqrestore(&ha->hardware_lock, flags);
947
948 if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
949 break;
950 if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
951 break;
952
953 DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
954 "complete... ctrl_sts=0x%x, remaining=%d\n",
955 ha->host_no, __func__, ctrl_status,
956 max_wait_time));
957
958 msleep(250);
959 } while ((max_wait_time--));
960
961 if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
962 DEBUG(printk("scsi%ld: %s: Firmware has started\n",
963 ha->host_no, __func__));
964
965 spin_lock_irqsave(&ha->hardware_lock, flags);
966 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
967 &ha->reg->ctrl_status);
968 readl(&ha->reg->ctrl_status);
969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970
971 status = QLA_SUCCESS;
972 } else {
973 printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
974 "- mbox status 0x%x\n", ha->host_no, __func__,
975 mbox_status);
976 status = QLA_ERROR;
977 }
978 return status;
979}
980
981static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
982{
983#define QL4_LOCK_DRVR_WAIT 300
984#define QL4_LOCK_DRVR_SLEEP 100
985
986 int drvr_wait = QL4_LOCK_DRVR_WAIT;
987 while (drvr_wait) {
988 if (ql4xxx_lock_drvr(a) == 0) {
989 msleep(QL4_LOCK_DRVR_SLEEP);
990 if (drvr_wait) {
991 DEBUG2(printk("scsi%ld: %s: Waiting for "
992 "Global Init Semaphore...n",
993 a->host_no,
994 __func__));
995 }
996 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
997 } else {
998 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
999 "acquired.n", a->host_no, __func__));
1000 return QLA_SUCCESS;
1001 }
1002 }
1003 return QLA_ERROR;
1004}
1005
1006/**
1007 * qla4xxx_start_firmware - starts qla4xxx firmware
1008 * @ha: Pointer to host adapter structure.
1009 *
1010 * This routine performs the neccessary steps to start the firmware for
1011 * the QLA4010 adapter.
1012 **/
1013static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1014{
1015 unsigned long flags = 0;
1016 uint32_t mbox_status;
1017 int status = QLA_ERROR;
1018 int soft_reset = 1;
1019 int config_chip = 0;
1020
1021 if (is_qla4010(ha)){
1022 if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
1023 return QLA_ERROR;
1024 }
1025
1026 if (is_qla4022(ha))
1027 ql4xxx_set_mac_number(ha);
1028
1029 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1030 return QLA_ERROR;
1031
1032 spin_lock_irqsave(&ha->hardware_lock, flags);
1033
1034 DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no,
1035 __func__, readw(isp_port_ctrl(ha))));
1036 DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
1037 __func__, readw(isp_port_status(ha))));
1038
1039 /* Is Hardware already initialized? */
1040 if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
1041 DEBUG(printk("scsi%ld: %s: Hardware has already been "
1042 "initialized\n", ha->host_no, __func__));
1043
1044 /* Receive firmware boot acknowledgement */
1045 mbox_status = readw(&ha->reg->mailbox[0]);
1046
1047 DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
1048 "0x%x\n", ha->host_no, __func__, mbox_status));
1049
1050 /* Is firmware already booted? */
1051 if (mbox_status == 0) {
1052 /* F/W not running, must be config by net driver */
1053 config_chip = 1;
1054 soft_reset = 0;
1055 } else {
1056 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
1057 &ha->reg->ctrl_status);
1058 readl(&ha->reg->ctrl_status);
1059 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1060 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
1061 DEBUG2(printk("scsi%ld: %s: Get firmware "
1062 "state -- state = 0x%x\n",
1063 ha->host_no,
1064 __func__, ha->firmware_state));
1065 /* F/W is running */
1066 if (ha->firmware_state &
1067 FW_STATE_CONFIG_WAIT) {
1068 DEBUG2(printk("scsi%ld: %s: Firmware "
1069 "in known state -- "
1070 "config and "
1071 "boot, state = 0x%x\n",
1072 ha->host_no, __func__,
1073 ha->firmware_state));
1074 config_chip = 1;
1075 soft_reset = 0;
1076 }
1077 } else {
1078 DEBUG2(printk("scsi%ld: %s: Firmware in "
1079 "unknown state -- resetting,"
1080 " state = "
1081 "0x%x\n", ha->host_no, __func__,
1082 ha->firmware_state));
1083 }
1084 spin_lock_irqsave(&ha->hardware_lock, flags);
1085 }
1086 } else {
1087 DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
1088 "started - resetting\n", ha->host_no, __func__));
1089 }
1090 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1091
1092 DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
1093 ha->host_no, __func__, soft_reset, config_chip));
1094 if (soft_reset) {
1095 DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
1096 __func__));
1097 status = qla4xxx_soft_reset(ha);
1098 if (status == QLA_ERROR) {
1099 DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
1100 ha->host_no, __func__));
1101 ql4xxx_unlock_drvr(ha);
1102 return QLA_ERROR;
1103 }
1104 config_chip = 1;
1105
1106 /* Reset clears the semaphore, so aquire again */
1107 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1108 return QLA_ERROR;
1109 }
1110
1111 if (config_chip) {
1112 if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
1113 status = qla4xxx_start_firmware_from_flash(ha);
1114 }
1115
1116 ql4xxx_unlock_drvr(ha);
1117 if (status == QLA_SUCCESS) {
1118 qla4xxx_get_fw_version(ha);
1119 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
1120 qla4xxx_get_crash_record(ha);
1121 } else {
1122 DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
1123 ha->host_no, __func__));
1124 }
1125 return status;
1126}
1127
1128
1129/**
1130 * qla4xxx_initialize_adapter - initiailizes hba
1131 * @ha: Pointer to host adapter structure.
1132 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
1133 * after adapter recovery has completed.
1134 * 0=preserve ddb list, 1=destroy and rebuild ddb list
1135 *
1136 * This routine parforms all of the steps necessary to initialize the adapter.
1137 *
1138 **/
1139int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1140 uint8_t renew_ddb_list)
1141{
1142 int status = QLA_ERROR;
1143 int8_t ip_address[IP_ADDR_LEN] = {0} ;
1144
1145 ha->eeprom_cmd_data = 0;
1146
1147 qla4x00_pci_config(ha);
1148
1149 qla4xxx_disable_intrs(ha);
1150
1151 /* Initialize the Host adapter request/response queues and firmware */
1152 if (qla4xxx_start_firmware(ha) == QLA_ERROR)
1153 return status;
1154
1155 if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
1156 return status;
1157
1158 if (qla4xxx_init_local_data(ha) == QLA_ERROR)
1159 return status;
1160
1161 status = qla4xxx_init_firmware(ha);
1162 if (status == QLA_ERROR)
1163 return status;
1164
1165 /*
1166 * FW is waiting to get an IP address from DHCP server: Skip building
1167 * the ddb_list and wait for DHCP lease acquired aen to come in
1168 * followed by 0x8014 aen" to trigger the tgt discovery process.
1169 */
1170 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
1171 return status;
1172
1173 /* Skip device discovery if ip and subnet is zero */
1174 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
1175 memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
1176 return status;
1177
1178 if (renew_ddb_list == PRESERVE_DDB_LIST) {
1179 /*
1180 * We want to preserve lun states (i.e. suspended, etc.)
1181 * for recovery initiated by the driver. So just update
1182 * the device states for the existing ddb_list.
1183 */
1184 qla4xxx_reinitialize_ddb_list(ha);
1185 } else if (renew_ddb_list == REBUILD_DDB_LIST) {
1186 /*
1187 * We want to build the ddb_list from scratch during
1188 * driver initialization and recovery initiated by the
1189 * INT_HBA_RESET IOCTL.
1190 */
1191 status = qla4xxx_initialize_ddb_list(ha);
1192 if (status == QLA_ERROR) {
1193 DEBUG2(printk("%s(%ld) Error occurred during build"
1194 "ddb list\n", __func__, ha->host_no));
1195 goto exit_init_hba;
1196 }
1197
1198 }
1199 if (!ha->tot_ddbs) {
1200 DEBUG2(printk("scsi%ld: Failed to initialize devices or none "
1201 "present in Firmware device database\n",
1202 ha->host_no));
1203 }
1204
1205 exit_init_hba:
1206 return status;
1207
1208}
1209
1210/**
1211 * qla4xxx_add_device_dynamically - ddb addition due to an AEN
1212 * @ha: Pointer to host adapter structure.
1213 * @fw_ddb_index: Firmware's device database index
1214 *
1215 * This routine processes adds a device as a result of an 8014h AEN.
1216 **/
1217static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1218 uint32_t fw_ddb_index)
1219{
1220 struct ddb_entry * ddb_entry;
1221
1222 /* First allocate a device structure */
1223 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
1224 if (ddb_entry == NULL) {
1225 DEBUG2(printk(KERN_WARNING
1226 "scsi%ld: Unable to allocate memory to add "
1227 "fw_ddb_index %d\n", ha->host_no, fw_ddb_index));
1228 return;
1229 }
1230
1231 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
1232 QLA_ERROR) {
1233 ha->fw_ddb_index_map[fw_ddb_index] =
1234 (struct ddb_entry *)INVALID_ENTRY;
1235 DEBUG2(printk(KERN_WARNING
1236 "scsi%ld: failed to add new device at index "
1237 "[%d]\n Unable to retrieve fw ddb entry\n",
1238 ha->host_no, fw_ddb_index));
1239 qla4xxx_free_ddb(ha, ddb_entry);
1240 return;
1241 }
1242
1243 if (qla4xxx_add_sess(ddb_entry)) {
1244 DEBUG2(printk(KERN_WARNING
1245 "scsi%ld: failed to add new device at index "
1246 "[%d]\n Unable to add connection and session\n",
1247 ha->host_no, fw_ddb_index));
1248 qla4xxx_free_ddb(ha, ddb_entry);
1249 }
1250}
1251
1252/**
1253 * qla4xxx_process_ddb_changed - process ddb state change
1254 * @ha - Pointer to host adapter structure.
1255 * @fw_ddb_index - Firmware's device database index
1256 * @state - Device state
1257 *
1258 * This routine processes a Decive Database Changed AEN Event.
1259 **/
1260int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1261 uint32_t fw_ddb_index, uint32_t state)
1262{
1263 struct ddb_entry * ddb_entry;
1264 uint32_t old_fw_ddb_device_state;
1265
1266 /* check for out of range index */
1267 if (fw_ddb_index >= MAX_DDB_ENTRIES)
1268 return QLA_ERROR;
1269
1270 /* Get the corresponging ddb entry */
1271 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
1272 /* Device does not currently exist in our database. */
1273 if (ddb_entry == NULL) {
1274 if (state == DDB_DS_SESSION_ACTIVE)
1275 qla4xxx_add_device_dynamically(ha, fw_ddb_index);
1276 return QLA_SUCCESS;
1277 }
1278
1279 /* Device already exists in our database. */
1280 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
1281 DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
1282 "index [%d]\n", ha->host_no, __func__,
1283 ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
1284 if (old_fw_ddb_device_state == state &&
1285 state == DDB_DS_SESSION_ACTIVE) {
1286 /* Do nothing, state not changed. */
1287 return QLA_SUCCESS;
1288 }
1289
1290 ddb_entry->fw_ddb_device_state = state;
1291 /* Device is back online. */
1292 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
1293 atomic_set(&ddb_entry->port_down_timer,
1294 ha->port_down_retry_count);
1295 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1296 atomic_set(&ddb_entry->relogin_retry_count, 0);
1297 atomic_set(&ddb_entry->relogin_timer, 0);
1298 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1299 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
1300 iscsi_if_create_session_done(ddb_entry->conn);
1301 /*
1302 * Change the lun state to READY in case the lun TIMEOUT before
1303 * the device came back.
1304 */
1305 } else {
1306 /* Device went away, try to relogin. */
1307 /* Mark device missing */
1308 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
1309 qla4xxx_mark_device_missing(ha, ddb_entry);
1310 /*
1311 * Relogin if device state changed to a not active state.
1312 * However, do not relogin if this aen is a result of an IOCTL
1313 * logout (DF_NO_RELOGIN) or if this is a discovered device.
1314 */
1315 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
1316 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
1317 !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
1318 !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
1319 /*
1320 * This triggers a relogin. After the relogin_timer
1321 * expires, the relogin gets scheduled. We must wait a
1322 * minimum amount of time since receiving an 0x8014 AEN
1323 * with failed device_state or a logout response before
1324 * we can issue another relogin.
1325 */
1326 /* Firmware padds this timeout: (time2wait +1).
1327 * Driver retry to login should be longer than F/W.
1328 * Otherwise F/W will fail
1329 * set_ddb() mbx cmd with 0x4005 since it still
1330 * counting down its time2wait.
1331 */
1332 atomic_set(&ddb_entry->relogin_timer, 0);
1333 atomic_set(&ddb_entry->retry_relogin_timer,
1334 ddb_entry->default_time2wait + 4);
1335 }
1336 }
1337
1338 return QLA_SUCCESS;
1339}
1340
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
new file mode 100644
index 00000000000..0d61797af7d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -0,0 +1,84 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8/*
9 *
10 * qla4xxx_lookup_ddb_by_fw_index
11 * This routine locates a device handle given the firmware device
12 * database index. If device doesn't exist, returns NULL.
13 *
14 * Input:
15 * ha - Pointer to host adapter structure.
16 * fw_ddb_index - Firmware's device database index
17 *
18 * Returns:
19 * Pointer to the corresponding internal device database structure
20 */
21static inline struct ddb_entry *
22qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
23{
24 struct ddb_entry *ddb_entry = NULL;
25
26 if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
27 (ha->fw_ddb_index_map[fw_ddb_index] !=
28 (struct ddb_entry *) INVALID_ENTRY)) {
29 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
30 }
31
32 DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n",
33 ha->host_no, __func__, fw_ddb_index, ddb_entry));
34
35 return ddb_entry;
36}
37
38static inline void
39__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
40{
41 if (is_qla4022(ha)) {
42 writel(set_rmask(IMR_SCSI_INTR_ENABLE),
43 &ha->reg->u1.isp4022.intr_mask);
44 readl(&ha->reg->u1.isp4022.intr_mask);
45 } else {
46 writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
47 readl(&ha->reg->ctrl_status);
48 }
49 set_bit(AF_INTERRUPTS_ON, &ha->flags);
50}
51
52static inline void
53__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
54{
55 if (is_qla4022(ha)) {
56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
57 &ha->reg->u1.isp4022.intr_mask);
58 readl(&ha->reg->u1.isp4022.intr_mask);
59 } else {
60 writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
61 readl(&ha->reg->ctrl_status);
62 }
63 clear_bit(AF_INTERRUPTS_ON, &ha->flags);
64}
65
66static inline void
67qla4xxx_enable_intrs(struct scsi_qla_host *ha)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&ha->hardware_lock, flags);
72 __qla4xxx_enable_intrs(ha);
73 spin_unlock_irqrestore(&ha->hardware_lock, flags);
74}
75
76static inline void
77qla4xxx_disable_intrs(struct scsi_qla_host *ha)
78{
79 unsigned long flags;
80
81 spin_lock_irqsave(&ha->hardware_lock, flags);
82 __qla4xxx_disable_intrs(ha);
83 spin_unlock_irqrestore(&ha->hardware_lock, flags);
84}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
new file mode 100644
index 00000000000..c0a254b89a3
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -0,0 +1,368 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10#include <scsi/scsi_tcq.h>
11
12/**
13 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
14 * @ha: Pointer to host adapter structure.
15 * @queue_entry: Pointer to pointer to queue entry structure
16 *
17 * This routine performs the following tasks:
18 * - returns the current request_in pointer (if queue not full)
19 * - advances the request_in pointer
20 * - checks for queue full
21 **/
22int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
23 struct queue_entry **queue_entry)
24{
25 uint16_t request_in;
26 uint8_t status = QLA_SUCCESS;
27
28 *queue_entry = ha->request_ptr;
29
30 /* get the latest request_in and request_out index */
31 request_in = ha->request_in;
32 ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
33
34 /* Advance request queue pointer and check for queue full */
35 if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
36 request_in = 0;
37 ha->request_ptr = ha->request_ring;
38 } else {
39 request_in++;
40 ha->request_ptr++;
41 }
42
43 /* request queue is full, try again later */
44 if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
45 /* restore request pointer */
46 ha->request_ptr = *queue_entry;
47 status = QLA_ERROR;
48 } else {
49 ha->request_in = request_in;
50 memset(*queue_entry, 0, sizeof(**queue_entry));
51 }
52
53 return status;
54}
55
56/**
57 * qla4xxx_send_marker_iocb - issues marker iocb to HBA
58 * @ha: Pointer to host adapter structure.
59 * @ddb_entry: Pointer to device database entry
60 * @lun: SCSI LUN
61 * @marker_type: marker identifier
62 *
63 * This routine issues a marker IOCB.
64 **/
65int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
66 struct ddb_entry *ddb_entry, int lun)
67{
68 struct marker_entry *marker_entry;
69 unsigned long flags = 0;
70 uint8_t status = QLA_SUCCESS;
71
72 /* Acquire hardware specific lock */
73 spin_lock_irqsave(&ha->hardware_lock, flags);
74
75 /* Get pointer to the queue entry for the marker */
76 if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
77 QLA_SUCCESS) {
78 status = QLA_ERROR;
79 goto exit_send_marker;
80 }
81
82 /* Put the marker in the request queue */
83 marker_entry->hdr.entryType = ET_MARKER;
84 marker_entry->hdr.entryCount = 1;
85 marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
86 marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
87 int_to_scsilun(lun, &marker_entry->lun);
88 wmb();
89
90 /* Tell ISP it's got a new I/O request */
91 writel(ha->request_in, &ha->reg->req_q_in);
92 readl(&ha->reg->req_q_in);
93
94exit_send_marker:
95 spin_unlock_irqrestore(&ha->hardware_lock, flags);
96 return status;
97}
98
99struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
100 struct scsi_qla_host *ha)
101{
102 struct continuation_t1_entry *cont_entry;
103
104 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
105
106 /* Advance request queue pointer */
107 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
108 ha->request_in = 0;
109 ha->request_ptr = ha->request_ring;
110 } else {
111 ha->request_in++;
112 ha->request_ptr++;
113 }
114
115 /* Load packet defaults */
116 cont_entry->hdr.entryType = ET_CONTINUE;
117 cont_entry->hdr.entryCount = 1;
118 cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
119
120 return cont_entry;
121}
122
123uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
124{
125 uint16_t iocbs;
126
127 iocbs = 1;
128 if (dsds > COMMAND_SEG) {
129 iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
130 if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
131 iocbs++;
132 }
133 return iocbs;
134}
135
136void qla4xxx_build_scsi_iocbs(struct srb *srb,
137 struct command_t3_entry *cmd_entry,
138 uint16_t tot_dsds)
139{
140 struct scsi_qla_host *ha;
141 uint16_t avail_dsds;
142 struct data_seg_a64 *cur_dsd;
143 struct scsi_cmnd *cmd;
144
145 cmd = srb->cmd;
146 ha = srb->ha;
147
148 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
149 /* No data being transferred */
150 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
151 return;
152 }
153
154 avail_dsds = COMMAND_SEG;
155 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
156
157 /* Load data segments */
158 if (cmd->use_sg) {
159 struct scatterlist *cur_seg;
160 struct scatterlist *end_seg;
161
162 cur_seg = (struct scatterlist *)cmd->request_buffer;
163 end_seg = cur_seg + tot_dsds;
164 while (cur_seg < end_seg) {
165 dma_addr_t sle_dma;
166
167 /* Allocate additional continuation packets? */
168 if (avail_dsds == 0) {
169 struct continuation_t1_entry *cont_entry;
170
171 cont_entry = qla4xxx_alloc_cont_entry(ha);
172 cur_dsd =
173 (struct data_seg_a64 *)
174 &cont_entry->dataseg[0];
175 avail_dsds = CONTINUE_SEG;
176 }
177
178 sle_dma = sg_dma_address(cur_seg);
179 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
180 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
181 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
182 avail_dsds--;
183
184 cur_dsd++;
185 cur_seg++;
186 }
187 } else {
188 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
189 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
190 cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
191 }
192}
193
194/**
195 * qla4xxx_send_command_to_isp - issues command to HBA
196 * @ha: pointer to host adapter structure.
197 * @srb: pointer to SCSI Request Block to be sent to ISP
198 *
199 * This routine is called by qla4xxx_queuecommand to build an ISP
200 * command and pass it to the ISP for execution.
201 **/
202int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
203{
204 struct scsi_cmnd *cmd = srb->cmd;
205 struct ddb_entry *ddb_entry;
206 struct command_t3_entry *cmd_entry;
207 struct scatterlist *sg = NULL;
208
209 uint16_t tot_dsds;
210 uint16_t req_cnt;
211
212 unsigned long flags;
213 uint16_t cnt;
214 uint32_t index;
215 char tag[2];
216
217 /* Get real lun and adapter */
218 ddb_entry = srb->ddb;
219
220 /* Send marker(s) if needed. */
221 if (ha->marker_needed == 1) {
222 if (qla4xxx_send_marker_iocb(ha, ddb_entry,
223 cmd->device->lun) != QLA_SUCCESS)
224 return QLA_ERROR;
225
226 ha->marker_needed = 0;
227 }
228 tot_dsds = 0;
229
230 /* Acquire hardware specific lock */
231 spin_lock_irqsave(&ha->hardware_lock, flags);
232
233 index = (uint32_t)cmd->request->tag;
234
235 /* Calculate the number of request entries needed. */
236 if (cmd->use_sg) {
237 sg = (struct scatterlist *)cmd->request_buffer;
238 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
239 cmd->sc_data_direction);
240 if (tot_dsds == 0)
241 goto queuing_error;
242 } else if (cmd->request_bufflen) {
243 dma_addr_t req_dma;
244
245 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
246 cmd->request_bufflen,
247 cmd->sc_data_direction);
248 if (dma_mapping_error(req_dma))
249 goto queuing_error;
250
251 srb->dma_handle = req_dma;
252 tot_dsds = 1;
253 }
254 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
255
256 if (ha->req_q_count < (req_cnt + 2)) {
257 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
258 if (ha->request_in < cnt)
259 ha->req_q_count = cnt - ha->request_in;
260 else
261 ha->req_q_count = REQUEST_QUEUE_DEPTH -
262 (ha->request_in - cnt);
263 }
264
265 if (ha->req_q_count < (req_cnt + 2))
266 goto queuing_error;
267
268 /* total iocbs active */
269 if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
270 goto queuing_error;
271
272 /* Build command packet */
273 cmd_entry = (struct command_t3_entry *) ha->request_ptr;
274 memset(cmd_entry, 0, sizeof(struct command_t3_entry));
275 cmd_entry->hdr.entryType = ET_COMMAND;
276 cmd_entry->handle = cpu_to_le32(index);
277 cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
278 cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
279
280 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
281 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
282 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
283 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
284 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
285 cmd_entry->hdr.entryCount = req_cnt;
286
287 /* Set data transfer direction control flags
288 * NOTE: Look at data_direction bits iff there is data to be
289 * transferred, as the data direction bit is sometimed filled
290 * in when there is no data to be transferred */
291 cmd_entry->control_flags = CF_NO_DATA;
292 if (cmd->request_bufflen) {
293 if (cmd->sc_data_direction == DMA_TO_DEVICE)
294 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ;
297 }
298
299 /* Set tagged queueing control flags */
300 cmd_entry->control_flags |= CF_SIMPLE_TAG;
301 if (scsi_populate_tag_msg(cmd, tag))
302 switch (tag[0]) {
303 case MSG_HEAD_TAG:
304 cmd_entry->control_flags |= CF_HEAD_TAG;
305 break;
306 case MSG_ORDERED_TAG:
307 cmd_entry->control_flags |= CF_ORDERED_TAG;
308 break;
309 }
310
311
312 /* Advance request queue pointer */
313 ha->request_in++;
314 if (ha->request_in == REQUEST_QUEUE_DEPTH) {
315 ha->request_in = 0;
316 ha->request_ptr = ha->request_ring;
317 } else
318 ha->request_ptr++;
319
320
321 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
322 wmb();
323
324 /*
325 * Check to see if adapter is online before placing request on
326 * request queue. If a reset occurs and a request is in the queue,
327 * the firmware will still attempt to process the request, retrieving
328 * garbage for pointers.
329 */
330 if (!test_bit(AF_ONLINE, &ha->flags)) {
331 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
332 "Do not issue command.\n",
333 ha->host_no, __func__));
334 goto queuing_error;
335 }
336
337 srb->cmd->host_scribble = (unsigned char *)srb;
338
339 /* update counters */
340 srb->state = SRB_ACTIVE_STATE;
341 srb->flags |= SRB_DMA_VALID;
342
343 /* Track IOCB used */
344 ha->iocb_cnt += req_cnt;
345 srb->iocb_cnt = req_cnt;
346 ha->req_q_count -= req_cnt;
347
348 /* Debug print statements */
349 writel(ha->request_in, &ha->reg->req_q_in);
350 readl(&ha->reg->req_q_in);
351 spin_unlock_irqrestore(&ha->hardware_lock, flags);
352
353 return QLA_SUCCESS;
354
355queuing_error:
356
357 if (cmd->use_sg && tot_dsds) {
358 sg = (struct scatterlist *) cmd->request_buffer;
359 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
360 cmd->sc_data_direction);
361 } else if (tot_dsds)
362 pci_unmap_single(ha->pdev, srb->dma_handle,
363 cmd->request_bufflen, cmd->sc_data_direction);
364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
365
366 return QLA_ERROR;
367}
368
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
new file mode 100644
index 00000000000..b584317608d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -0,0 +1,797 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10/**
11 * qla2x00_process_completed_request() - Process a Fast Post response.
12 * @ha: SCSI driver HA context
13 * @index: SRB index
14 **/
15static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
16 uint32_t index)
17{
18 struct srb *srb;
19
20 srb = qla4xxx_del_from_active_array(ha, index);
21 if (srb) {
22 /* Save ISP completion status */
23 srb->cmd->result = DID_OK << 16;
24 qla4xxx_srb_compl(ha, srb);
25 } else {
26 DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
27 "%d\n", ha->host_no, index));
28 set_bit(DPC_RESET_HA, &ha->dpc_flags);
29 }
30}
31
32/**
33 * qla4xxx_status_entry - processes status IOCBs
34 * @ha: Pointer to host adapter structure.
35 * @sts_entry: Pointer to status entry structure.
36 **/
37static void qla4xxx_status_entry(struct scsi_qla_host *ha,
38 struct status_entry *sts_entry)
39{
40 uint8_t scsi_status;
41 struct scsi_cmnd *cmd;
42 struct srb *srb;
43 struct ddb_entry *ddb_entry;
44 uint32_t residual;
45 uint16_t sensebytecnt;
46
47 if (sts_entry->completionStatus == SCS_COMPLETE &&
48 sts_entry->scsiStatus == 0) {
49 qla4xxx_process_completed_request(ha,
50 le32_to_cpu(sts_entry->
51 handle));
52 return;
53 }
54
55 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
56 if (!srb) {
57 /* FIXMEdg: Don't we need to reset ISP in this case??? */
58 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
59 "handle 0x%x, sp=%p. This cmd may have already "
60 "been completed.\n", ha->host_no, __func__,
61 le32_to_cpu(sts_entry->handle), srb));
62 return;
63 }
64
65 cmd = srb->cmd;
66 if (cmd == NULL) {
67 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
68 "OS pkt->handle=%d srb=%p srb->state:%d\n",
69 ha->host_no, __func__, sts_entry->handle,
70 srb, srb->state));
71 dev_warn(&ha->pdev->dev, "Command is NULL:"
72 " already returned to OS (srb=%p)\n", srb);
73 return;
74 }
75
76 ddb_entry = srb->ddb;
77 if (ddb_entry == NULL) {
78 cmd->result = DID_NO_CONNECT << 16;
79 goto status_entry_exit;
80 }
81
82 residual = le32_to_cpu(sts_entry->residualByteCnt);
83
84 /* Translate ISP error to a Linux SCSI error. */
85 scsi_status = sts_entry->scsiStatus;
86 switch (sts_entry->completionStatus) {
87 case SCS_COMPLETE:
88 if (scsi_status == 0) {
89 cmd->result = DID_OK << 16;
90 break;
91 }
92
93 if (sts_entry->iscsiFlags &
94 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
95 cmd->resid = residual;
96
97 cmd->result = DID_OK << 16 | scsi_status;
98
99 if (scsi_status != SCSI_CHECK_CONDITION)
100 break;
101
102 /* Copy Sense Data into sense buffer. */
103 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
104
105 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
106 if (sensebytecnt == 0)
107 break;
108
109 memcpy(cmd->sense_buffer, sts_entry->senseData,
110 min(sensebytecnt,
111 (uint16_t) sizeof(cmd->sense_buffer)));
112
113 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
114 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
115 cmd->device->channel, cmd->device->id,
116 cmd->device->lun, __func__,
117 sts_entry->senseData[2] & 0x0f,
118 sts_entry->senseData[12],
119 sts_entry->senseData[13]));
120
121 srb->flags |= SRB_GOT_SENSE;
122 break;
123
124 case SCS_INCOMPLETE:
125 /* Always set the status to DID_ERROR, since
126 * all conditions result in that status anyway */
127 cmd->result = DID_ERROR << 16;
128 break;
129
130 case SCS_RESET_OCCURRED:
131 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
132 ha->host_no, cmd->device->channel,
133 cmd->device->id, cmd->device->lun, __func__));
134
135 cmd->result = DID_RESET << 16;
136 break;
137
138 case SCS_ABORTED:
139 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
140 ha->host_no, cmd->device->channel,
141 cmd->device->id, cmd->device->lun, __func__));
142
143 cmd->result = DID_RESET << 16;
144 break;
145
146 case SCS_TIMEOUT:
147 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
148 ha->host_no, cmd->device->channel,
149 cmd->device->id, cmd->device->lun));
150
151 cmd->result = DID_BUS_BUSY << 16;
152
153 /*
154 * Mark device missing so that we won't continue to send
155 * I/O to this device. We should get a ddb state change
156 * AEN soon.
157 */
158 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
159 qla4xxx_mark_device_missing(ha, ddb_entry);
160 break;
161
162 case SCS_DATA_UNDERRUN:
163 case SCS_DATA_OVERRUN:
164 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
165 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
166 "residual = 0x%x\n", ha->host_no,
167 cmd->device->channel, cmd->device->id,
168 cmd->device->lun, __func__, residual));
169
170 cmd->result = DID_ERROR << 16;
171 break;
172 }
173
174 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
175 /*
176 * Firmware detected a SCSI transport underrun
177 * condition
178 */
179 cmd->resid = residual;
180 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
181 "detected, xferlen = 0x%x, residual = "
182 "0x%x\n",
183 ha->host_no, cmd->device->channel,
184 cmd->device->id,
185 cmd->device->lun, __func__,
186 cmd->request_bufflen,
187 residual));
188 }
189
190 /*
191 * If there is scsi_status, it takes precedense over
192 * underflow condition.
193 */
194 if (scsi_status != 0) {
195 cmd->result = DID_OK << 16 | scsi_status;
196
197 if (scsi_status != SCSI_CHECK_CONDITION)
198 break;
199
200 /* Copy Sense Data into sense buffer. */
201 memset(cmd->sense_buffer, 0,
202 sizeof(cmd->sense_buffer));
203
204 sensebytecnt =
205 le16_to_cpu(sts_entry->senseDataByteCnt);
206 if (sensebytecnt == 0)
207 break;
208
209 memcpy(cmd->sense_buffer, sts_entry->senseData,
210 min(sensebytecnt,
211 (uint16_t) sizeof(cmd->sense_buffer)));
212
213 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
214 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
215 cmd->device->channel, cmd->device->id,
216 cmd->device->lun, __func__,
217 sts_entry->senseData[2] & 0x0f,
218 sts_entry->senseData[12],
219 sts_entry->senseData[13]));
220 } else {
221 /*
222 * If RISC reports underrun and target does not
223 * report it then we must have a lost frame, so
224 * tell upper layer to retry it by reporting a
225 * bus busy.
226 */
227 if ((sts_entry->iscsiFlags &
228 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
229 cmd->result = DID_BUS_BUSY << 16;
230 } else if ((cmd->request_bufflen - residual) <
231 cmd->underflow) {
232 /*
233 * Handle mid-layer underflow???
234 *
235 * For kernels less than 2.4, the driver must
236 * return an error if an underflow is detected.
237 * For kernels equal-to and above 2.4, the
238 * mid-layer will appearantly handle the
239 * underflow by detecting the residual count --
240 * unfortunately, we do not see where this is
241 * actually being done. In the interim, we
242 * will return DID_ERROR.
243 */
244 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
245 "Mid-layer Data underrun, "
246 "xferlen = 0x%x, "
247 "residual = 0x%x\n", ha->host_no,
248 cmd->device->channel,
249 cmd->device->id,
250 cmd->device->lun, __func__,
251 cmd->request_bufflen, residual));
252
253 cmd->result = DID_ERROR << 16;
254 } else {
255 cmd->result = DID_OK << 16;
256 }
257 }
258 break;
259
260 case SCS_DEVICE_LOGGED_OUT:
261 case SCS_DEVICE_UNAVAILABLE:
262 /*
263 * Mark device missing so that we won't continue to
264 * send I/O to this device. We should get a ddb
265 * state change AEN soon.
266 */
267 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
268 qla4xxx_mark_device_missing(ha, ddb_entry);
269
270 cmd->result = DID_BUS_BUSY << 16;
271 break;
272
273 case SCS_QUEUE_FULL:
274 /*
275 * SCSI Mid-Layer handles device queue full
276 */
277 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
278 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
279 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
280 " iResp=%02x\n", ha->host_no, cmd->device->id,
281 cmd->device->lun, __func__,
282 sts_entry->completionStatus,
283 sts_entry->scsiStatus, sts_entry->state_flags,
284 sts_entry->iscsiFlags,
285 sts_entry->iscsiResponse));
286 break;
287
288 default:
289 cmd->result = DID_ERROR << 16;
290 break;
291 }
292
293status_entry_exit:
294
295 /* complete the request */
296 srb->cc_stat = sts_entry->completionStatus;
297 qla4xxx_srb_compl(ha, srb);
298}
299
300/**
301 * qla4xxx_process_response_queue - process response queue completions
302 * @ha: Pointer to host adapter structure.
303 *
304 * This routine process response queue completions in interrupt context.
305 * Hardware_lock locked upon entry
306 **/
307static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
308{
309 uint32_t count = 0;
310 struct srb *srb = NULL;
311 struct status_entry *sts_entry;
312
313 /* Process all responses from response queue */
314 while ((ha->response_in =
315 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
316 ha->response_out) {
317 sts_entry = (struct status_entry *) ha->response_ptr;
318 count++;
319
320 /* Advance pointers for next entry */
321 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
322 ha->response_out = 0;
323 ha->response_ptr = ha->response_ring;
324 } else {
325 ha->response_out++;
326 ha->response_ptr++;
327 }
328
329 /* process entry */
330 switch (sts_entry->hdr.entryType) {
331 case ET_STATUS:
332 /*
333 * Common status - Single completion posted in single
334 * IOSB.
335 */
336 qla4xxx_status_entry(ha, sts_entry);
337 break;
338
339 case ET_PASSTHRU_STATUS:
340 break;
341
342 case ET_STATUS_CONTINUATION:
343 /* Just throw away the status continuation entries */
344 DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
345 "- ignoring\n", ha->host_no, __func__));
346 break;
347
348 case ET_COMMAND:
349 /* ISP device queue is full. Command not
350 * accepted by ISP. Queue command for
351 * later */
352
353 srb = qla4xxx_del_from_active_array(ha,
354 le32_to_cpu(sts_entry->
355 handle));
356 if (srb == NULL)
357 goto exit_prq_invalid_handle;
358
359 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
360 "srb %p\n", ha->host_no, __func__, srb));
361
362 /* ETRY normally by sending it back with
363 * DID_BUS_BUSY */
364 srb->cmd->result = DID_BUS_BUSY << 16;
365 qla4xxx_srb_compl(ha, srb);
366 break;
367
368 case ET_CONTINUE:
369 /* Just throw away the continuation entries */
370 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
371 "ignoring\n", ha->host_no, __func__));
372 break;
373
374 default:
375 /*
376 * Invalid entry in response queue, reset RISC
377 * firmware.
378 */
379 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
380 "response queue \n", ha->host_no,
381 __func__,
382 sts_entry->hdr.entryType));
383 goto exit_prq_error;
384 }
385 }
386
387 /*
388 * Done with responses, update the ISP For QLA4010, this also clears
389 * the interrupt.
390 */
391 writel(ha->response_out, &ha->reg->rsp_q_out);
392 readl(&ha->reg->rsp_q_out);
393
394 return;
395
396exit_prq_invalid_handle:
397 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
398 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
399 sts_entry->completionStatus));
400
401exit_prq_error:
402 writel(ha->response_out, &ha->reg->rsp_q_out);
403 readl(&ha->reg->rsp_q_out);
404
405 set_bit(DPC_RESET_HA, &ha->dpc_flags);
406}
407
408/**
409 * qla4xxx_isr_decode_mailbox - decodes mailbox status
410 * @ha: Pointer to host adapter structure.
411 * @mailbox_status: Mailbox status.
412 *
413 * This routine decodes the mailbox status during the ISR.
414 * Hardware_lock locked upon entry. runs in interrupt context.
415 **/
416static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
417 uint32_t mbox_status)
418{
419 int i;
420
421 if ((mbox_status == MBOX_STS_BUSY) ||
422 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
423 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
424 ha->mbox_status[0] = mbox_status;
425
426 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
427 /*
428 * Copy all mailbox registers to a temporary
429 * location and set mailbox command done flag
430 */
431 for (i = 1; i < ha->mbox_status_count; i++)
432 ha->mbox_status[i] =
433 readl(&ha->reg->mailbox[i]);
434
435 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
436 wake_up(&ha->mailbox_wait_queue);
437 }
438 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
439 /* Immediately process the AENs that don't require much work.
440 * Only queue the database_changed AENs */
441 switch (mbox_status) {
442 case MBOX_ASTS_SYSTEM_ERROR:
443 /* Log Mailbox registers */
444 if (ql4xdontresethba) {
445 DEBUG2(printk("%s:Dont Reset HBA\n",
446 __func__));
447 } else {
448 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
449 set_bit(DPC_RESET_HA, &ha->dpc_flags);
450 }
451 break;
452
453 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
454 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
455 case MBOX_ASTS_NVRAM_INVALID:
456 case MBOX_ASTS_IP_ADDRESS_CHANGED:
457 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
458 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
459 "Reset HA\n", ha->host_no, mbox_status));
460 set_bit(DPC_RESET_HA, &ha->dpc_flags);
461 break;
462
463 case MBOX_ASTS_LINK_UP:
464 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
465 ha->host_no, mbox_status));
466 set_bit(AF_LINK_UP, &ha->flags);
467 break;
468
469 case MBOX_ASTS_LINK_DOWN:
470 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
471 ha->host_no, mbox_status));
472 clear_bit(AF_LINK_UP, &ha->flags);
473 break;
474
475 case MBOX_ASTS_HEARTBEAT:
476 ha->seconds_since_last_heartbeat = 0;
477 break;
478
479 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
480 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
481 "ACQUIRED\n", ha->host_no, mbox_status));
482 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
483 break;
484
485 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
486 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
487 * mode
488 * only */
489 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
490 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
491 case MBOX_ASTS_SUBNET_STATE_CHANGE:
492 /* No action */
493 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
494 mbox_status));
495 break;
496
497 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
498 case MBOX_ASTS_DNS:
499 /* No action */
500 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
501 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
502 ha->host_no, mbox_status,
503 readl(&ha->reg->mailbox[1]),
504 readl(&ha->reg->mailbox[2])));
505 break;
506
507 case MBOX_ASTS_SELF_TEST_FAILED:
508 case MBOX_ASTS_LOGIN_FAILED:
509 /* No action */
510 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
511 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
512 ha->host_no, mbox_status,
513 readl(&ha->reg->mailbox[1]),
514 readl(&ha->reg->mailbox[2]),
515 readl(&ha->reg->mailbox[3])));
516 break;
517
518 case MBOX_ASTS_DATABASE_CHANGED:
519 /* Queue AEN information and process it in the DPC
520 * routine */
521 if (ha->aen_q_count > 0) {
522 /* advance pointer */
523 if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
524 ha->aen_in = 0;
525 else
526 ha->aen_in++;
527
528 /* decrement available counter */
529 ha->aen_q_count--;
530
531 for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
532 ha->aen_q[ha->aen_in].mbox_sts[i] =
533 readl(&ha->reg->mailbox[i]);
534
535 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
536
537 /* print debug message */
538 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
539 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
540 ha->host_no, ha->aen_in,
541 mbox_status,
542 ha->aen_q[ha->aen_in].mbox_sts[1],
543 ha->aen_q[ha->aen_in].mbox_sts[2],
544 ha->aen_q[ha->aen_in].mbox_sts[3],
545 ha->aen_q[ha->aen_in]. mbox_sts[4]));
546
547 /* The DPC routine will process the aen */
548 set_bit(DPC_AEN, &ha->dpc_flags);
549 } else {
550 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
551 "overflowed! AEN LOST!!\n",
552 ha->host_no, __func__,
553 mbox_status));
554
555 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
556 ha->host_no));
557
558 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
559 DEBUG2(printk("AEN[%d] %04x %04x %04x "
560 "%04x\n", i,
561 ha->aen_q[i].mbox_sts[0],
562 ha->aen_q[i].mbox_sts[1],
563 ha->aen_q[i].mbox_sts[2],
564 ha->aen_q[i].mbox_sts[3]));
565 }
566 }
567 break;
568
569 default:
570 DEBUG2(printk(KERN_WARNING
571 "scsi%ld: AEN %04x UNKNOWN\n",
572 ha->host_no, mbox_status));
573 break;
574 }
575 } else {
576 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
577 ha->host_no, mbox_status));
578
579 ha->mbox_status[0] = mbox_status;
580 }
581}
582
583/**
584 * qla4xxx_interrupt_service_routine - isr
585 * @ha: pointer to host adapter structure.
586 *
587 * This is the main interrupt service routine.
588 * hardware_lock locked upon entry. runs in interrupt context.
589 **/
590void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
591 uint32_t intr_status)
592{
593 /* Process response queue interrupt. */
594 if (intr_status & CSR_SCSI_COMPLETION_INTR)
595 qla4xxx_process_response_queue(ha);
596
597 /* Process mailbox/asynch event interrupt.*/
598 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
599 qla4xxx_isr_decode_mailbox(ha,
600 readl(&ha->reg->mailbox[0]));
601
602 /* Clear Mailbox Interrupt */
603 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
604 &ha->reg->ctrl_status);
605 readl(&ha->reg->ctrl_status);
606 }
607}
608
609/**
610 * qla4xxx_intr_handler - hardware interrupt handler.
611 * @irq: Unused
612 * @dev_id: Pointer to host adapter structure
613 * @regs: Unused
614 **/
615irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
616{
617 struct scsi_qla_host *ha;
618 uint32_t intr_status;
619 unsigned long flags = 0;
620 uint8_t reqs_count = 0;
621
622 ha = (struct scsi_qla_host *) dev_id;
623 if (!ha) {
624 DEBUG2(printk(KERN_INFO
625 "qla4xxx: Interrupt with NULL host ptr\n"));
626 return IRQ_NONE;
627 }
628
629 spin_lock_irqsave(&ha->hardware_lock, flags);
630
631 /*
632 * Repeatedly service interrupts up to a maximum of
633 * MAX_REQS_SERVICED_PER_INTR
634 */
635 while (1) {
636 /*
637 * Read interrupt status
638 */
639 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
640 ha->response_out)
641 intr_status = CSR_SCSI_COMPLETION_INTR;
642 else
643 intr_status = readl(&ha->reg->ctrl_status);
644
645 if ((intr_status &
646 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
647 0) {
648 if (reqs_count == 0)
649 ha->spurious_int_count++;
650 break;
651 }
652
653 if (intr_status & CSR_FATAL_ERROR) {
654 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
655 "Status 0x%04x\n", ha->host_no,
656 readl(isp_port_error_status (ha))));
657
658 /* Issue Soft Reset to clear this error condition.
659 * This will prevent the RISC from repeatedly
660 * interrupting the driver; thus, allowing the DPC to
661 * get scheduled to continue error recovery.
662 * NOTE: Disabling RISC interrupts does not work in
663 * this case, as CSR_FATAL_ERROR overrides
664 * CSR_SCSI_INTR_ENABLE */
665 if ((readl(&ha->reg->ctrl_status) &
666 CSR_SCSI_RESET_INTR) == 0) {
667 writel(set_rmask(CSR_SOFT_RESET),
668 &ha->reg->ctrl_status);
669 readl(&ha->reg->ctrl_status);
670 }
671
672 writel(set_rmask(CSR_FATAL_ERROR),
673 &ha->reg->ctrl_status);
674 readl(&ha->reg->ctrl_status);
675
676 __qla4xxx_disable_intrs(ha);
677
678 set_bit(DPC_RESET_HA, &ha->dpc_flags);
679
680 break;
681 } else if (intr_status & CSR_SCSI_RESET_INTR) {
682 clear_bit(AF_ONLINE, &ha->flags);
683 __qla4xxx_disable_intrs(ha);
684
685 writel(set_rmask(CSR_SCSI_RESET_INTR),
686 &ha->reg->ctrl_status);
687 readl(&ha->reg->ctrl_status);
688
689 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
690
691 break;
692 } else if (intr_status & INTR_PENDING) {
693 qla4xxx_interrupt_service_routine(ha, intr_status);
694 ha->total_io_count++;
695 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
696 break;
697
698 intr_status = 0;
699 }
700 }
701
702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
703
704 return IRQ_HANDLED;
705}
706
707/**
708 * qla4xxx_process_aen - processes AENs generated by firmware
709 * @ha: pointer to host adapter structure.
710 * @process_aen: type of AENs to process
711 *
712 * Processes specific types of Asynchronous Events generated by firmware.
713 * The type of AENs to process is specified by process_aen and can be
714 * PROCESS_ALL_AENS 0
715 * FLUSH_DDB_CHANGED_AENS 1
716 * RELOGIN_DDB_CHANGED_AENS 2
717 **/
718void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
719{
720 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
721 struct aen *aen;
722 int i;
723 unsigned long flags;
724
725 spin_lock_irqsave(&ha->hardware_lock, flags);
726 while (ha->aen_out != ha->aen_in) {
727 /* Advance pointers for next entry */
728 if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
729 ha->aen_out = 0;
730 else
731 ha->aen_out++;
732
733 ha->aen_q_count++;
734 aen = &ha->aen_q[ha->aen_out];
735
736 /* copy aen information to local structure */
737 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
738 mbox_sts[i] = aen->mbox_sts[i];
739
740 spin_unlock_irqrestore(&ha->hardware_lock, flags);
741
742 DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
743 "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
744 mbox_sts[0], mbox_sts[2], mbox_sts[3],
745 mbox_sts[1], mbox_sts[4]));
746
747 switch (mbox_sts[0]) {
748 case MBOX_ASTS_DATABASE_CHANGED:
749 if (process_aen == FLUSH_DDB_CHANGED_AENS) {
750 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
751 "[%d] state=%04x FLUSHED!\n",
752 ha->host_no, ha->aen_out,
753 mbox_sts[0], mbox_sts[2],
754 mbox_sts[3]));
755 break;
756 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
757 /* for use during init time, we only want to
758 * relogin non-active ddbs */
759 struct ddb_entry *ddb_entry;
760
761 ddb_entry =
762 /* FIXME: name length? */
763 qla4xxx_lookup_ddb_by_fw_index(ha,
764 mbox_sts[2]);
765 if (!ddb_entry)
766 break;
767
768 ddb_entry->dev_scan_wait_to_complete_relogin =
769 0;
770 ddb_entry->dev_scan_wait_to_start_relogin =
771 jiffies +
772 ((ddb_entry->default_time2wait +
773 4) * HZ);
774
775 DEBUG2(printk("scsi%ld: ddb index [%d] initate"
776 " RELOGIN after %d seconds\n",
777 ha->host_no,
778 ddb_entry->fw_ddb_index,
779 ddb_entry->default_time2wait +
780 4));
781 break;
782 }
783
784 if (mbox_sts[1] == 0) { /* Global DB change. */
785 qla4xxx_reinitialize_ddb_list(ha);
786 } else if (mbox_sts[1] == 1) { /* Specific device. */
787 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
788 mbox_sts[3]);
789 }
790 break;
791 }
792 spin_lock_irqsave(&ha->hardware_lock, flags);
793 }
794 spin_unlock_irqrestore(&ha->hardware_lock, flags);
795
796}
797
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
new file mode 100644
index 00000000000..ed977f70b2d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -0,0 +1,930 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10
11/**
12 * qla4xxx_mailbox_command - issues mailbox commands
13 * @ha: Pointer to host adapter structure.
14 * @inCount: number of mailbox registers to load.
15 * @outCount: number of mailbox registers to return.
16 * @mbx_cmd: data pointer for mailbox in registers.
17 * @mbx_sts: data pointer for mailbox out registers.
18 *
19 * This routine sssue mailbox commands and waits for completion.
20 * If outCount is 0, this routine completes successfully WITHOUT waiting
21 * for the mailbox command to complete.
22 **/
23int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
24 uint8_t outCount, uint32_t *mbx_cmd,
25 uint32_t *mbx_sts)
26{
27 int status = QLA_ERROR;
28 uint8_t i;
29 u_long wait_count;
30 uint32_t intr_status;
31 unsigned long flags = 0;
32 DECLARE_WAITQUEUE(wait, current);
33
34 mutex_lock(&ha->mbox_sem);
35
36 /* Mailbox code active */
37 set_bit(AF_MBOX_COMMAND, &ha->flags);
38
39 /* Make sure that pointers are valid */
40 if (!mbx_cmd || !mbx_sts) {
41 DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
42 "pointer\n", ha->host_no, __func__));
43 goto mbox_exit;
44 }
45
46 /* To prevent overwriting mailbox registers for a command that has
47 * not yet been serviced, check to see if a previously issued
48 * mailbox command is interrupting.
49 * -----------------------------------------------------------------
50 */
51 spin_lock_irqsave(&ha->hardware_lock, flags);
52 intr_status = readl(&ha->reg->ctrl_status);
53 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
54 /* Service existing interrupt */
55 qla4xxx_interrupt_service_routine(ha, intr_status);
56 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
57 }
58
59 /* Send the mailbox command to the firmware */
60 ha->mbox_status_count = outCount;
61 for (i = 0; i < outCount; i++)
62 ha->mbox_status[i] = 0;
63
64 /* Load all mailbox registers, except mailbox 0. */
65 for (i = 1; i < inCount; i++)
66 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
67
68 /* Wakeup firmware */
69 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
70 readl(&ha->reg->mailbox[0]);
71 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
72 readl(&ha->reg->ctrl_status);
73 spin_unlock_irqrestore(&ha->hardware_lock, flags);
74
75 /* Wait for completion */
76 set_current_state(TASK_UNINTERRUPTIBLE);
77 add_wait_queue(&ha->mailbox_wait_queue, &wait);
78
79 /*
80 * If we don't want status, don't wait for the mailbox command to
81 * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
82 * you must poll the inbound Interrupt Mask for completion.
83 */
84 if (outCount == 0) {
85 status = QLA_SUCCESS;
86 set_current_state(TASK_RUNNING);
87 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
88 goto mbox_exit;
89 }
90 /* Wait for command to complete */
91 wait_count = jiffies + MBOX_TOV * HZ;
92 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
93 if (time_after_eq(jiffies, wait_count))
94 break;
95
96 spin_lock_irqsave(&ha->hardware_lock, flags);
97 intr_status = readl(&ha->reg->ctrl_status);
98 if (intr_status & INTR_PENDING) {
99 /*
100 * Service the interrupt.
101 * The ISR will save the mailbox status registers
102 * to a temporary storage location in the adapter
103 * structure.
104 */
105 ha->mbox_status_count = outCount;
106 qla4xxx_interrupt_service_routine(ha, intr_status);
107 }
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 msleep(10);
110 }
111 set_current_state(TASK_RUNNING);
112 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
113
114 /* Check for mailbox timeout. */
115 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
116 DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
117 " Scheduling Adapter Reset\n", ha->host_no,
118 mbx_cmd[0]));
119 ha->mailbox_timeout_count++;
120 mbx_sts[0] = (-1);
121 set_bit(DPC_RESET_HA, &ha->dpc_flags);
122 goto mbox_exit;
123 }
124
125 /*
126 * Copy the mailbox out registers to the caller's mailbox in/out
127 * structure.
128 */
129 spin_lock_irqsave(&ha->hardware_lock, flags);
130 for (i = 0; i < outCount; i++)
131 mbx_sts[i] = ha->mbox_status[i];
132
133 /* Set return status and error flags (if applicable). */
134 switch (ha->mbox_status[0]) {
135 case MBOX_STS_COMMAND_COMPLETE:
136 status = QLA_SUCCESS;
137 break;
138
139 case MBOX_STS_INTERMEDIATE_COMPLETION:
140 status = QLA_SUCCESS;
141 break;
142
143 case MBOX_STS_BUSY:
144 DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
145 ha->host_no, __func__, mbx_cmd[0]));
146 ha->mailbox_timeout_count++;
147 break;
148
149 default:
150 DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
151 "sts = %08X ****\n", ha->host_no, __func__,
152 mbx_cmd[0], mbx_sts[0]));
153 break;
154 }
155 spin_unlock_irqrestore(&ha->hardware_lock, flags);
156
157mbox_exit:
158 clear_bit(AF_MBOX_COMMAND, &ha->flags);
159 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
160 mutex_unlock(&ha->mbox_sem);
161
162 return status;
163}
164
165
166/**
167 * qla4xxx_issue_iocb - issue mailbox iocb command
168 * @ha: adapter state pointer.
169 * @buffer: buffer pointer.
170 * @phys_addr: physical address of buffer.
171 * @size: size of buffer.
172 *
173 * Issues iocbs via mailbox commands.
174 * TARGET_QUEUE_LOCK must be released.
175 * ADAPTER_STATE_LOCK must be released.
176 **/
177int
178qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
179 dma_addr_t phys_addr, size_t size)
180{
181 uint32_t mbox_cmd[MBOX_REG_COUNT];
182 uint32_t mbox_sts[MBOX_REG_COUNT];
183 int status;
184
185 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
186 memset(&mbox_sts, 0, sizeof(mbox_sts));
187 mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
188 mbox_cmd[1] = 0;
189 mbox_cmd[2] = LSDW(phys_addr);
190 mbox_cmd[3] = MSDW(phys_addr);
191 status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
192 return status;
193}
194
195int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
196 uint16_t fw_ddb_index,
197 uint16_t connection_id,
198 uint16_t option)
199{
200 uint32_t mbox_cmd[MBOX_REG_COUNT];
201 uint32_t mbox_sts[MBOX_REG_COUNT];
202
203 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
204 memset(&mbox_sts, 0, sizeof(mbox_sts));
205 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
206 mbox_cmd[1] = fw_ddb_index;
207 mbox_cmd[2] = connection_id;
208 mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
209 if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
210 QLA_SUCCESS) {
211 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
212 "option %04x failed sts %04X %04X",
213 ha->host_no, __func__,
214 option, mbox_sts[0], mbox_sts[1]));
215 if (mbox_sts[0] == 0x4005)
216 DEBUG2(printk("%s reason %04X\n", __func__,
217 mbox_sts[1]));
218 }
219 return QLA_SUCCESS;
220}
221
222int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
223 uint16_t fw_ddb_index)
224{
225 uint32_t mbox_cmd[MBOX_REG_COUNT];
226 uint32_t mbox_sts[MBOX_REG_COUNT];
227
228 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
229 memset(&mbox_sts, 0, sizeof(mbox_sts));
230 mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
231 mbox_cmd[1] = fw_ddb_index;
232 if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
233 QLA_SUCCESS)
234 return QLA_ERROR;
235
236 return QLA_SUCCESS;
237}
238
239/**
240 * qla4xxx_initialize_fw_cb - initializes firmware control block.
241 * @ha: Pointer to host adapter structure.
242 **/
243int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
244{
245 struct init_fw_ctrl_blk *init_fw_cb;
246 dma_addr_t init_fw_cb_dma;
247 uint32_t mbox_cmd[MBOX_REG_COUNT];
248 uint32_t mbox_sts[MBOX_REG_COUNT];
249 int status = QLA_ERROR;
250
251 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
252 sizeof(struct init_fw_ctrl_blk),
253 &init_fw_cb_dma, GFP_KERNEL);
254 if (init_fw_cb == NULL) {
255 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
256 ha->host_no, __func__));
257 return 10;
258 }
259 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
260
261 /* Get Initialize Firmware Control Block. */
262 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
263 memset(&mbox_sts, 0, sizeof(mbox_sts));
264 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
265 mbox_cmd[2] = LSDW(init_fw_cb_dma);
266 mbox_cmd[3] = MSDW(init_fw_cb_dma);
267 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
268 QLA_SUCCESS) {
269 dma_free_coherent(&ha->pdev->dev,
270 sizeof(struct init_fw_ctrl_blk),
271 init_fw_cb, init_fw_cb_dma);
272 return status;
273 }
274
275 /* Initialize request and response queues. */
276 qla4xxx_init_rings(ha);
277
278 /* Fill in the request and response queue information. */
279 init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
280 init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
281 init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
282 init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
283 init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
284 init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
285 init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
286 init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
287 init_fw_cb->ShadowRegBufAddrLo =
288 cpu_to_le32(LSDW(ha->shadow_regs_dma));
289 init_fw_cb->ShadowRegBufAddrHi =
290 cpu_to_le32(MSDW(ha->shadow_regs_dma));
291
292 /* Set up required options. */
293 init_fw_cb->FwOptions |=
294 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
295 FWOPT_INITIATOR_MODE);
296 init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
297
298 /* Save some info in adapter structure. */
299 ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
300 ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
301 ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
302 memcpy(ha->ip_address, init_fw_cb->IPAddr,
303 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
304 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
305 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
306 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
307 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
308 memcpy(ha->name_string, init_fw_cb->iSCSINameString,
309 min(sizeof(ha->name_string),
310 sizeof(init_fw_cb->iSCSINameString)));
311 memcpy(ha->alias, init_fw_cb->Alias,
312 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
313
314 /* Save Command Line Paramater info */
315 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
316 ha->discovery_wait = ql4xdiscoverywait;
317
318 /* Send Initialize Firmware Control Block. */
319 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
320 mbox_cmd[1] = 0;
321 mbox_cmd[2] = LSDW(init_fw_cb_dma);
322 mbox_cmd[3] = MSDW(init_fw_cb_dma);
323 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
324 QLA_SUCCESS)
325 status = QLA_SUCCESS;
326 else {
327 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
328 "failed w/ status %04X\n", ha->host_no, __func__,
329 mbox_sts[0]));
330 }
331 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
332 init_fw_cb, init_fw_cb_dma);
333
334 return status;
335}
336
337/**
338 * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
339 * @ha: Pointer to host adapter structure.
340 **/
341int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
342{
343 struct init_fw_ctrl_blk *init_fw_cb;
344 dma_addr_t init_fw_cb_dma;
345 uint32_t mbox_cmd[MBOX_REG_COUNT];
346 uint32_t mbox_sts[MBOX_REG_COUNT];
347
348 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
349 sizeof(struct init_fw_ctrl_blk),
350 &init_fw_cb_dma, GFP_KERNEL);
351 if (init_fw_cb == NULL) {
352 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
353 __func__);
354 return 10;
355 }
356
357 /* Get Initialize Firmware Control Block. */
358 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
359 memset(&mbox_sts, 0, sizeof(mbox_sts));
360 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
361 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
362 mbox_cmd[2] = LSDW(init_fw_cb_dma);
363 mbox_cmd[3] = MSDW(init_fw_cb_dma);
364
365 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
366 QLA_SUCCESS) {
367 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
368 ha->host_no, __func__));
369 dma_free_coherent(&ha->pdev->dev,
370 sizeof(struct init_fw_ctrl_blk),
371 init_fw_cb, init_fw_cb_dma);
372 return QLA_ERROR;
373 }
374
375 /* Save IP Address. */
376 memcpy(ha->ip_address, init_fw_cb->IPAddr,
377 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
378 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
379 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
380 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
381 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
382
383 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
384 init_fw_cb, init_fw_cb_dma);
385
386 return QLA_SUCCESS;
387}
388
389/**
390 * qla4xxx_get_firmware_state - gets firmware state of HBA
391 * @ha: Pointer to host adapter structure.
392 **/
393int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
394{
395 uint32_t mbox_cmd[MBOX_REG_COUNT];
396 uint32_t mbox_sts[MBOX_REG_COUNT];
397
398 /* Get firmware version */
399 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
400 memset(&mbox_sts, 0, sizeof(mbox_sts));
401 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
402 if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
403 QLA_SUCCESS) {
404 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
405 "status %04X\n", ha->host_no, __func__,
406 mbox_sts[0]));
407 return QLA_ERROR;
408 }
409 ha->firmware_state = mbox_sts[1];
410 ha->board_id = mbox_sts[2];
411 ha->addl_fw_state = mbox_sts[3];
412 DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
413 ha->host_no, __func__, ha->firmware_state);)
414
415 return QLA_SUCCESS;
416}
417
418/**
419 * qla4xxx_get_firmware_status - retrieves firmware status
420 * @ha: Pointer to host adapter structure.
421 **/
422int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
423{
424 uint32_t mbox_cmd[MBOX_REG_COUNT];
425 uint32_t mbox_sts[MBOX_REG_COUNT];
426
427 /* Get firmware version */
428 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
429 memset(&mbox_sts, 0, sizeof(mbox_sts));
430 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
431 if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
432 QLA_SUCCESS) {
433 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
434 "status %04X\n", ha->host_no, __func__,
435 mbox_sts[0]));
436 return QLA_ERROR;
437 }
438
439 /* High-water mark of IOCBs */
440 ha->iocb_hiwat = mbox_sts[2];
441 if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
442 ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
443 else
444 dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d "
445 "firmare IOCBs available (%d).\n",
446 IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
447
448 return QLA_SUCCESS;
449}
450
451/**
452 * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
453 * @ha: Pointer to host adapter structure.
454 * @fw_ddb_index: Firmware's device database index
455 * @fw_ddb_entry: Pointer to firmware's device database entry structure
456 * @num_valid_ddb_entries: Pointer to number of valid ddb entries
457 * @next_ddb_index: Pointer to next valid device database index
458 * @fw_ddb_device_state: Pointer to device state
459 **/
460int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
461 uint16_t fw_ddb_index,
462 struct dev_db_entry *fw_ddb_entry,
463 dma_addr_t fw_ddb_entry_dma,
464 uint32_t *num_valid_ddb_entries,
465 uint32_t *next_ddb_index,
466 uint32_t *fw_ddb_device_state,
467 uint32_t *conn_err_detail,
468 uint16_t *tcp_source_port_num,
469 uint16_t *connection_id)
470{
471 int status = QLA_ERROR;
472 uint32_t mbox_cmd[MBOX_REG_COUNT];
473 uint32_t mbox_sts[MBOX_REG_COUNT];
474
475 /* Make sure the device index is valid */
476 if (fw_ddb_index >= MAX_DDB_ENTRIES) {
477 DEBUG2(printk("scsi%ld: %s: index [%d] out of range.\n",
478 ha->host_no, __func__, fw_ddb_index));
479 goto exit_get_fwddb;
480 }
481 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
482 memset(&mbox_sts, 0, sizeof(mbox_sts));
483 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
484 mbox_cmd[1] = (uint32_t) fw_ddb_index;
485 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
486 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
487 if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
488 QLA_ERROR) {
489 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
490 " with status 0x%04X\n", ha->host_no, __func__,
491 mbox_sts[0]));
492 goto exit_get_fwddb;
493 }
494 if (fw_ddb_index != mbox_sts[1]) {
495 DEBUG2(printk("scsi%ld: %s: index mismatch [%d] != [%d].\n",
496 ha->host_no, __func__, fw_ddb_index,
497 mbox_sts[1]));
498 goto exit_get_fwddb;
499 }
500 if (fw_ddb_entry) {
501 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
502 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
503 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
504 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
505 fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
506 fw_ddb_entry->ipAddr[3],
507 le16_to_cpu(fw_ddb_entry->portNumber),
508 fw_ddb_entry->iscsiName);
509 }
510 if (num_valid_ddb_entries)
511 *num_valid_ddb_entries = mbox_sts[2];
512 if (next_ddb_index)
513 *next_ddb_index = mbox_sts[3];
514 if (fw_ddb_device_state)
515 *fw_ddb_device_state = mbox_sts[4];
516
517 /*
518 * RA: This mailbox has been changed to pass connection error and
519 * details. Its true for ISP4010 as per Version E - Not sure when it
520 * was changed. Get the time2wait from the fw_dd_entry field :
521 * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
522 * struct.
523 */
524 if (conn_err_detail)
525 *conn_err_detail = mbox_sts[5];
526 if (tcp_source_port_num)
527 *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
528 if (connection_id)
529 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
530 status = QLA_SUCCESS;
531
532exit_get_fwddb:
533 return status;
534}
535
536/**
537 * qla4xxx_set_fwddb_entry - sets a ddb entry.
538 * @ha: Pointer to host adapter structure.
539 * @fw_ddb_index: Firmware's device database index
540 * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL.
541 *
542 * This routine initializes or updates the adapter's device database
543 * entry for the specified device. It also triggers a login for the
544 * specified device. Therefore, it may also be used as a secondary
545 * login routine when a NULL pointer is specified for the fw_ddb_entry.
546 **/
547int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
548 dma_addr_t fw_ddb_entry_dma)
549{
550 uint32_t mbox_cmd[MBOX_REG_COUNT];
551 uint32_t mbox_sts[MBOX_REG_COUNT];
552
553 /* Do not wait for completion. The firmware will send us an
554 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
555 */
556 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
557 memset(&mbox_sts, 0, sizeof(mbox_sts));
558
559 mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
560 mbox_cmd[1] = (uint32_t) fw_ddb_index;
561 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
562 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
563 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
564}
565
566int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
567 uint16_t fw_ddb_index)
568{
569 int status = QLA_ERROR;
570 uint32_t mbox_cmd[MBOX_REG_COUNT];
571 uint32_t mbox_sts[MBOX_REG_COUNT];
572
573 /* Do not wait for completion. The firmware will send us an
574 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
575 */
576 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
577 memset(&mbox_sts, 0, sizeof(mbox_sts));
578 mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
579 mbox_cmd[1] = (uint32_t) fw_ddb_index;
580 mbox_cmd[2] = 0;
581 mbox_cmd[3] = 0;
582 mbox_cmd[4] = 0;
583 status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
584 DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
585 __func__, fw_ddb_index, status, mbox_sts[0],
586 mbox_sts[1]);)
587
588 return status;
589}
590
591/**
592 * qla4xxx_get_crash_record - retrieves crash record.
593 * @ha: Pointer to host adapter structure.
594 *
595 * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
596 **/
597void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
598{
599 uint32_t mbox_cmd[MBOX_REG_COUNT];
600 uint32_t mbox_sts[MBOX_REG_COUNT];
601 struct crash_record *crash_record = NULL;
602 dma_addr_t crash_record_dma = 0;
603 uint32_t crash_record_size = 0;
604 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
605 memset(&mbox_sts, 0, sizeof(mbox_cmd));
606
607 /* Get size of crash record. */
608 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
609 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
610 QLA_SUCCESS) {
611 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
612 ha->host_no, __func__));
613 goto exit_get_crash_record;
614 }
615 crash_record_size = mbox_sts[4];
616 if (crash_record_size == 0) {
617 DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
618 ha->host_no, __func__));
619 goto exit_get_crash_record;
620 }
621
622 /* Alloc Memory for Crash Record. */
623 crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
624 &crash_record_dma, GFP_KERNEL);
625 if (crash_record == NULL)
626 goto exit_get_crash_record;
627
628 /* Get Crash Record. */
629 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
630 mbox_cmd[2] = LSDW(crash_record_dma);
631 mbox_cmd[3] = MSDW(crash_record_dma);
632 mbox_cmd[4] = crash_record_size;
633 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
634 QLA_SUCCESS)
635 goto exit_get_crash_record;
636
637 /* Dump Crash Record. */
638
639exit_get_crash_record:
640 if (crash_record)
641 dma_free_coherent(&ha->pdev->dev, crash_record_size,
642 crash_record, crash_record_dma);
643}
644
645/**
646 * qla4xxx_get_conn_event_log - retrieves connection event log
647 * @ha: Pointer to host adapter structure.
648 **/
649void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
650{
651 uint32_t mbox_cmd[MBOX_REG_COUNT];
652 uint32_t mbox_sts[MBOX_REG_COUNT];
653 struct conn_event_log_entry *event_log = NULL;
654 dma_addr_t event_log_dma = 0;
655 uint32_t event_log_size = 0;
656 uint32_t num_valid_entries;
657 uint32_t oldest_entry = 0;
658 uint32_t max_event_log_entries;
659 uint8_t i;
660
661
662 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
663 memset(&mbox_sts, 0, sizeof(mbox_cmd));
664
665 /* Get size of crash record. */
666 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
667 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
668 QLA_SUCCESS)
669 goto exit_get_event_log;
670
671 event_log_size = mbox_sts[4];
672 if (event_log_size == 0)
673 goto exit_get_event_log;
674
675 /* Alloc Memory for Crash Record. */
676 event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
677 &event_log_dma, GFP_KERNEL);
678 if (event_log == NULL)
679 goto exit_get_event_log;
680
681 /* Get Crash Record. */
682 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
683 mbox_cmd[2] = LSDW(event_log_dma);
684 mbox_cmd[3] = MSDW(event_log_dma);
685 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
686 QLA_SUCCESS) {
687 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
688 "log!\n", ha->host_no, __func__));
689 goto exit_get_event_log;
690 }
691
692 /* Dump Event Log. */
693 num_valid_entries = mbox_sts[1];
694
695 max_event_log_entries = event_log_size /
696 sizeof(struct conn_event_log_entry);
697
698 if (num_valid_entries > max_event_log_entries)
699 oldest_entry = num_valid_entries % max_event_log_entries;
700
701 DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
702 ha->host_no, num_valid_entries));
703
704 if (extended_error_logging == 3) {
705 if (oldest_entry == 0) {
706 /* Circular Buffer has not wrapped around */
707 for (i=0; i < num_valid_entries; i++) {
708 qla4xxx_dump_buffer((uint8_t *)event_log+
709 (i*sizeof(*event_log)),
710 sizeof(*event_log));
711 }
712 }
713 else {
714 /* Circular Buffer has wrapped around -
715 * display accordingly*/
716 for (i=oldest_entry; i < max_event_log_entries; i++) {
717 qla4xxx_dump_buffer((uint8_t *)event_log+
718 (i*sizeof(*event_log)),
719 sizeof(*event_log));
720 }
721 for (i=0; i < oldest_entry; i++) {
722 qla4xxx_dump_buffer((uint8_t *)event_log+
723 (i*sizeof(*event_log)),
724 sizeof(*event_log));
725 }
726 }
727 }
728
729exit_get_event_log:
730 if (event_log)
731 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
732 event_log_dma);
733}
734
735/**
736 * qla4xxx_reset_lun - issues LUN Reset
737 * @ha: Pointer to host adapter structure.
738 * @db_entry: Pointer to device database entry
739 * @un_entry: Pointer to lun entry structure
740 *
741 * This routine performs a LUN RESET on the specified target/lun.
742 * The caller must ensure that the ddb_entry and lun_entry pointers
743 * are valid before calling this routine.
744 **/
745int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
746 int lun)
747{
748 uint32_t mbox_cmd[MBOX_REG_COUNT];
749 uint32_t mbox_sts[MBOX_REG_COUNT];
750 int status = QLA_SUCCESS;
751
752 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
753 ddb_entry->os_target_id, lun));
754
755 /*
756 * Send lun reset command to ISP, so that the ISP will return all
757 * outstanding requests with RESET status
758 */
759 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
760 memset(&mbox_sts, 0, sizeof(mbox_sts));
761 mbox_cmd[0] = MBOX_CMD_LUN_RESET;
762 mbox_cmd[1] = ddb_entry->fw_ddb_index;
763 mbox_cmd[2] = lun << 8;
764 mbox_cmd[5] = 0x01; /* Immediate Command Enable */
765 qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
766 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
767 mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
768 status = QLA_ERROR;
769
770 return status;
771}
772
773
774int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
775 uint32_t offset, uint32_t len)
776{
777 uint32_t mbox_cmd[MBOX_REG_COUNT];
778 uint32_t mbox_sts[MBOX_REG_COUNT];
779
780 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
781 memset(&mbox_sts, 0, sizeof(mbox_sts));
782 mbox_cmd[0] = MBOX_CMD_READ_FLASH;
783 mbox_cmd[1] = LSDW(dma_addr);
784 mbox_cmd[2] = MSDW(dma_addr);
785 mbox_cmd[3] = offset;
786 mbox_cmd[4] = len;
787 if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
788 QLA_SUCCESS) {
789 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
790 "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
791 __func__, mbox_sts[0], mbox_sts[1], offset, len));
792 return QLA_ERROR;
793 }
794 return QLA_SUCCESS;
795}
796
797/**
798 * qla4xxx_get_fw_version - gets firmware version
799 * @ha: Pointer to host adapter structure.
800 *
801 * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
802 * hold an address for data. Make sure that we write 0 to those mailboxes,
803 * if unused.
804 **/
805int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
806{
807 uint32_t mbox_cmd[MBOX_REG_COUNT];
808 uint32_t mbox_sts[MBOX_REG_COUNT];
809
810 /* Get firmware version. */
811 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
812 memset(&mbox_sts, 0, sizeof(mbox_sts));
813 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
814 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
815 QLA_SUCCESS) {
816 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
817 "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
818 return QLA_ERROR;
819 }
820
821 /* Save firmware version information. */
822 ha->firmware_version[0] = mbox_sts[1];
823 ha->firmware_version[1] = mbox_sts[2];
824 ha->patch_number = mbox_sts[3];
825 ha->build_number = mbox_sts[4];
826
827 return QLA_SUCCESS;
828}
829
830int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
831{
832 uint32_t mbox_cmd[MBOX_REG_COUNT];
833 uint32_t mbox_sts[MBOX_REG_COUNT];
834
835 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
836 memset(&mbox_sts, 0, sizeof(mbox_sts));
837
838 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
839 mbox_cmd[2] = LSDW(dma_addr);
840 mbox_cmd[3] = MSDW(dma_addr);
841
842 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
843 QLA_SUCCESS) {
844 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
845 ha->host_no, __func__, mbox_sts[0]));
846 return QLA_ERROR;
847 }
848 return QLA_SUCCESS;
849}
850
851int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
852{
853 uint32_t mbox_cmd[MBOX_REG_COUNT];
854 uint32_t mbox_sts[MBOX_REG_COUNT];
855
856 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
857 memset(&mbox_sts, 0, sizeof(mbox_sts));
858
859 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
860 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
861
862 if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
863 QLA_SUCCESS) {
864 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
865 *ddb_index = mbox_sts[2];
866 } else {
867 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
868 ha->host_no, __func__, mbox_sts[0]));
869 return QLA_ERROR;
870 }
871 } else {
872 *ddb_index = MAX_PRST_DEV_DB_ENTRIES;
873 }
874
875 return QLA_SUCCESS;
876}
877
878
879int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
880{
881 struct dev_db_entry *fw_ddb_entry;
882 dma_addr_t fw_ddb_entry_dma;
883 uint32_t ddb_index;
884 int ret_val = QLA_SUCCESS;
885
886
887 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
888 sizeof(*fw_ddb_entry),
889 &fw_ddb_entry_dma, GFP_KERNEL);
890 if (!fw_ddb_entry) {
891 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
892 ha->host_no, __func__));
893 ret_val = QLA_ERROR;
894 goto qla4xxx_send_tgts_exit;
895 }
896
897 ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
898 if (ret_val != QLA_SUCCESS)
899 goto qla4xxx_send_tgts_exit;
900
901 ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
902 if (ret_val != QLA_SUCCESS)
903 goto qla4xxx_send_tgts_exit;
904
905 memset((void *)fw_ddb_entry->iSCSIAlias, 0,
906 sizeof(fw_ddb_entry->iSCSIAlias));
907
908 memset((void *)fw_ddb_entry->iscsiName, 0,
909 sizeof(fw_ddb_entry->iscsiName));
910
911 memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
912 memset((void *)fw_ddb_entry->targetAddr, 0,
913 sizeof(fw_ddb_entry->targetAddr));
914
915 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
916 fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
917
918 fw_ddb_entry->ipAddr[0] = *ip;
919 fw_ddb_entry->ipAddr[1] = *(ip + 1);
920 fw_ddb_entry->ipAddr[2] = *(ip + 2);
921 fw_ddb_entry->ipAddr[3] = *(ip + 3);
922
923 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
924
925qla4xxx_send_tgts_exit:
926 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
927 fw_ddb_entry, fw_ddb_entry_dma);
928 return ret_val;
929}
930
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
new file mode 100644
index 00000000000..e3957ca5b64
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -0,0 +1,224 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10static inline int eeprom_size(struct scsi_qla_host *ha)
11{
12 return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
13}
14
15static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
16{
17 return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
18 FM93C56A_NO_ADDR_BITS_16;
19}
20
21static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
22{
23 return FM93C56A_DATA_BITS_16;
24}
25
26static int fm93c56a_select(struct scsi_qla_host * ha)
27{
28 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
29
30 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
31 writel(ha->eeprom_cmd_data, isp_nvram(ha));
32 readl(isp_nvram(ha));
33 return 1;
34}
35
36static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
37{
38 int i;
39 int mask;
40 int dataBit;
41 int previousBit;
42
43 /* Clock in a zero, then do the start bit. */
44 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
45 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
46 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
47 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
48 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
49 readl(isp_nvram(ha));
50 mask = 1 << (FM93C56A_CMD_BITS - 1);
51
52 /* Force the previous data bit to be different. */
53 previousBit = 0xffff;
54 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
55 dataBit =
56 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
57 if (previousBit != dataBit) {
58
59 /*
60 * If the bit changed, then change the DO state to
61 * match.
62 */
63 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
64 previousBit = dataBit;
65 }
66 writel(ha->eeprom_cmd_data | dataBit |
67 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
68 writel(ha->eeprom_cmd_data | dataBit |
69 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
70 readl(isp_nvram(ha));
71 cmd = cmd << 1;
72 }
73 mask = 1 << (eeprom_no_addr_bits(ha) - 1);
74
75 /* Force the previous data bit to be different. */
76 previousBit = 0xffff;
77 for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
78 dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
79 AUBURN_EEPROM_DO_0;
80 if (previousBit != dataBit) {
81 /*
82 * If the bit changed, then change the DO state to
83 * match.
84 */
85 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
86 previousBit = dataBit;
87 }
88 writel(ha->eeprom_cmd_data | dataBit |
89 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
90 writel(ha->eeprom_cmd_data | dataBit |
91 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
92 readl(isp_nvram(ha));
93 addr = addr << 1;
94 }
95 return 1;
96}
97
98static int fm93c56a_deselect(struct scsi_qla_host * ha)
99{
100 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
101 writel(ha->eeprom_cmd_data, isp_nvram(ha));
102 readl(isp_nvram(ha));
103 return 1;
104}
105
106static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
107{
108 int i;
109 int data = 0;
110 int dataBit;
111
112 /* Read the data bits
113 * The first bit is a dummy. Clock right over it. */
114 for (i = 0; i < eeprom_no_data_bits(ha); i++) {
115 writel(ha->eeprom_cmd_data |
116 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
117 writel(ha->eeprom_cmd_data |
118 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
119 dataBit =
120 (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
121 data = (data << 1) | dataBit;
122 }
123
124 *value = data;
125 return 1;
126}
127
128static int eeprom_readword(int eepromAddr, u16 * value,
129 struct scsi_qla_host * ha)
130{
131 fm93c56a_select(ha);
132 fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
133 fm93c56a_datain(ha, value);
134 fm93c56a_deselect(ha);
135 return 1;
136}
137
138/* Hardware_lock must be set before calling */
139u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
140{
141 u16 val;
142
143 /* NOTE: NVRAM uses half-word addresses */
144 eeprom_readword(offset, &val, ha);
145 return val;
146}
147
148int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
149{
150 int status = QLA_ERROR;
151 uint16_t checksum = 0;
152 uint32_t index;
153 unsigned long flags;
154
155 spin_lock_irqsave(&ha->hardware_lock, flags);
156 for (index = 0; index < eeprom_size(ha); index++)
157 checksum += rd_nvram_word(ha, index);
158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
159
160 if (checksum == 0)
161 status = QLA_SUCCESS;
162
163 return status;
164}
165
166/*************************************************************************
167 *
168 * Hardware Semaphore routines
169 *
170 *************************************************************************/
171int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
172{
173 uint32_t value;
174 unsigned long flags;
175 unsigned int seconds = 30;
176
177 DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
178 "0x%x\n", ha->host_no, sem_mask, sem_bits));
179 do {
180 spin_lock_irqsave(&ha->hardware_lock, flags);
181 writel((sem_mask | sem_bits), isp_semaphore(ha));
182 value = readw(isp_semaphore(ha));
183 spin_unlock_irqrestore(&ha->hardware_lock, flags);
184 if ((value & (sem_mask >> 16)) == sem_bits) {
185 DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
186 "code = 0x%x\n", ha->host_no,
187 sem_mask, sem_bits));
188 return QLA_SUCCESS;
189 }
190 ssleep(1);
191 } while (--seconds);
192 return QLA_ERROR;
193}
194
195void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
196{
197 unsigned long flags;
198
199 spin_lock_irqsave(&ha->hardware_lock, flags);
200 writel(sem_mask, isp_semaphore(ha));
201 readl(isp_semaphore(ha));
202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
203
204 DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
205 sem_mask));
206}
207
208int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
209{
210 uint32_t value;
211 unsigned long flags;
212
213 spin_lock_irqsave(&ha->hardware_lock, flags);
214 writel((sem_mask | sem_bits), isp_semaphore(ha));
215 value = readw(isp_semaphore(ha));
216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
217 if ((value & (sem_mask >> 16)) == sem_bits) {
218 DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
219 "0x%x, sema code=0x%x\n", ha->host_no,
220 sem_mask, sem_bits, value));
221 return 1;
222 }
223 return 0;
224}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
new file mode 100644
index 00000000000..08e2aed8c6c
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -0,0 +1,256 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef _QL4XNVRM_H_
9#define _QL4XNVRM_H_
10
11/*
12 * AM29LV Flash definitions
13 */
14#define FM93C56A_SIZE_8 0x100
15#define FM93C56A_SIZE_16 0x80
16#define FM93C66A_SIZE_8 0x200
17#define FM93C66A_SIZE_16 0x100/* 4010 */
18#define FM93C86A_SIZE_16 0x400/* 4022 */
19
20#define FM93C56A_START 0x1
21
22// Commands
23#define FM93C56A_READ 0x2
24#define FM93C56A_WEN 0x0
25#define FM93C56A_WRITE 0x1
26#define FM93C56A_WRITE_ALL 0x0
27#define FM93C56A_WDS 0x0
28#define FM93C56A_ERASE 0x3
29#define FM93C56A_ERASE_ALL 0x0
30
31/* Command Extentions */
32#define FM93C56A_WEN_EXT 0x3
33#define FM93C56A_WRITE_ALL_EXT 0x1
34#define FM93C56A_WDS_EXT 0x0
35#define FM93C56A_ERASE_ALL_EXT 0x2
36
37/* Address Bits */
38#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
39#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
40#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
41
42/* Data Bits */
43#define FM93C56A_DATA_BITS_16 16
44#define FM93C56A_DATA_BITS_8 8
45
46/* Special Bits */
47#define FM93C56A_READ_DUMMY_BITS 1
48#define FM93C56A_READY 0
49#define FM93C56A_BUSY 1
50#define FM93C56A_CMD_BITS 2
51
52/* Auburn Bits */
53#define AUBURN_EEPROM_DI 0x8
54#define AUBURN_EEPROM_DI_0 0x0
55#define AUBURN_EEPROM_DI_1 0x8
56#define AUBURN_EEPROM_DO 0x4
57#define AUBURN_EEPROM_DO_0 0x0
58#define AUBURN_EEPROM_DO_1 0x4
59#define AUBURN_EEPROM_CS 0x2
60#define AUBURN_EEPROM_CS_0 0x0
61#define AUBURN_EEPROM_CS_1 0x2
62#define AUBURN_EEPROM_CLK_RISE 0x1
63#define AUBURN_EEPROM_CLK_FALL 0x0
64
65/* */
66/* EEPROM format */
67/* */
68struct bios_params {
69 uint16_t SpinUpDelay:1;
70 uint16_t BIOSDisable:1;
71 uint16_t MMAPEnable:1;
72 uint16_t BootEnable:1;
73 uint16_t Reserved0:12;
74 uint8_t bootID0:7;
75 uint8_t bootID0Valid:1;
76 uint8_t bootLUN0[8];
77 uint8_t bootID1:7;
78 uint8_t bootID1Valid:1;
79 uint8_t bootLUN1[8];
80 uint16_t MaxLunsPerTarget;
81 uint8_t Reserved1[10];
82};
83
84struct eeprom_port_cfg {
85
86 /* MTU MAC 0 */
87 u16 etherMtu_mac;
88
89 /* Flow Control MAC 0 */
90 u16 pauseThreshold_mac;
91 u16 resumeThreshold_mac;
92 u16 reserved[13];
93};
94
95struct eeprom_function_cfg {
96 u8 reserved[30];
97
98 /* MAC ADDR */
99 u8 macAddress[6];
100 u8 macAddressSecondary[6];
101 u16 subsysVendorId;
102 u16 subsysDeviceId;
103};
104
105struct eeprom_data {
106 union {
107 struct { /* isp4010 */
108 u8 asic_id[4]; /* x00 */
109 u8 version; /* x04 */
110 u8 reserved; /* x05 */
111 u16 board_id; /* x06 */
112#define EEPROM_BOARDID_ELDORADO 1
113#define EEPROM_BOARDID_PLACER 2
114
115#define EEPROM_SERIAL_NUM_SIZE 16
116 u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
117
118 /* ExtHwConfig: */
119 /* Offset = 24bytes
120 *
121 * | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
122 * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
123 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
124 */
125 u16 ext_hw_conf; /* x18 */
126 u8 mac0[6]; /* x1A */
127 u8 mac1[6]; /* x20 */
128 u8 mac2[6]; /* x26 */
129 u8 mac3[6]; /* x2C */
130 u16 etherMtu; /* x32 */
131 u16 macConfig; /* x34 */
132#define MAC_CONFIG_ENABLE_ANEG 0x0001
133#define MAC_CONFIG_ENABLE_PAUSE 0x0002
134 u16 phyConfig; /* x36 */
135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
137 u16 topcat; /* x38 */
138#define TOPCAT_PRESENT 0x0100
139#define TOPCAT_MASK 0xFF00
140
141#define EEPROM_UNUSED_1_SIZE 2
142 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
143 u16 bufletSize; /* x3C */
144 u16 bufletCount; /* x3E */
145 u16 bufletPauseThreshold; /* x40 */
146 u16 tcpWindowThreshold50; /* x42 */
147 u16 tcpWindowThreshold25; /* x44 */
148 u16 tcpWindowThreshold0; /* x46 */
149 u16 ipHashTableBaseHi; /* x48 */
150 u16 ipHashTableBaseLo; /* x4A */
151 u16 ipHashTableSize; /* x4C */
152 u16 tcpHashTableBaseHi; /* x4E */
153 u16 tcpHashTableBaseLo; /* x50 */
154 u16 tcpHashTableSize; /* x52 */
155 u16 ncbTableBaseHi; /* x54 */
156 u16 ncbTableBaseLo; /* x56 */
157 u16 ncbTableSize; /* x58 */
158 u16 drbTableBaseHi; /* x5A */
159 u16 drbTableBaseLo; /* x5C */
160 u16 drbTableSize; /* x5E */
161
162#define EEPROM_UNUSED_2_SIZE 4
163 u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
164 u16 ipReassemblyTimeout; /* x64 */
165 u16 tcpMaxWindowSizeHi; /* x66 */
166 u16 tcpMaxWindowSizeLo; /* x68 */
167 u32 net_ip_addr0; /* x6A Added for TOE
168 * functionality. */
169 u32 net_ip_addr1; /* x6E */
170 u32 scsi_ip_addr0; /* x72 */
171 u32 scsi_ip_addr1; /* x76 */
172#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
173 * for ip addresses */
174 u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
175 u16 subsysVendorId_f0; /* xFA */
176 u16 subsysDeviceId_f0; /* xFC */
177
178 /* Address = 0x7F */
179#define FM93C56A_SIGNATURE 0x9356
180#define FM93C66A_SIGNATURE 0x9366
181 u16 signature; /* xFE */
182
183#define EEPROM_UNUSED_4_SIZE 250
184 u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
185 u16 subsysVendorId_f1; /* x1FA */
186 u16 subsysDeviceId_f1; /* x1FC */
187 u16 checksum; /* x1FE */
188 } __attribute__ ((packed)) isp4010;
189 struct { /* isp4022 */
190 u8 asicId[4]; /* x00 */
191 u8 version; /* x04 */
192 u8 reserved_5; /* x05 */
193 u16 boardId; /* x06 */
194 u8 boardIdStr[16]; /* x08 */
195 u8 serialNumber[16]; /* x18 */
196
197 /* External Hardware Configuration */
198 u16 ext_hw_conf; /* x28 */
199
200 /* MAC 0 CONFIGURATION */
201 struct eeprom_port_cfg macCfg_port0; /* x2A */
202
203 /* MAC 1 CONFIGURATION */
204 struct eeprom_port_cfg macCfg_port1; /* x4A */
205
206 /* DDR SDRAM Configuration */
207 u16 bufletSize; /* x6A */
208 u16 bufletCount; /* x6C */
209 u16 tcpWindowThreshold50; /* x6E */
210 u16 tcpWindowThreshold25; /* x70 */
211 u16 tcpWindowThreshold0; /* x72 */
212 u16 ipHashTableBaseHi; /* x74 */
213 u16 ipHashTableBaseLo; /* x76 */
214 u16 ipHashTableSize; /* x78 */
215 u16 tcpHashTableBaseHi; /* x7A */
216 u16 tcpHashTableBaseLo; /* x7C */
217 u16 tcpHashTableSize; /* x7E */
218 u16 ncbTableBaseHi; /* x80 */
219 u16 ncbTableBaseLo; /* x82 */
220 u16 ncbTableSize; /* x84 */
221 u16 drbTableBaseHi; /* x86 */
222 u16 drbTableBaseLo; /* x88 */
223 u16 drbTableSize; /* x8A */
224 u16 reserved_142[4]; /* x8C */
225
226 /* TCP/IP Parameters */
227 u16 ipReassemblyTimeout; /* x94 */
228 u16 tcpMaxWindowSize; /* x96 */
229 u16 ipSecurity; /* x98 */
230 u8 reserved_156[294]; /* x9A */
231 u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
232 struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
233 u16 reserved_510; /* x1FE */
234
235 /* Address = 512 */
236 u8 oemSpace[432]; /* x200 */
237 struct bios_params sBIOSParams_fn1; /* x3B0 */
238 struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
239 u16 reserved_1022; /* x3FE */
240
241 /* Address = 1024 */
242 u8 reserved_1024[464]; /* x400 */
243 struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
244 u16 reserved_1534; /* x5FE */
245
246 /* Address = 1536 */
247 u8 reserved_1536[432]; /* x600 */
248 struct bios_params sBIOSParams_fn3; /* x7B0 */
249 struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
250 u16 checksum; /* x7FE */
251 } __attribute__ ((packed)) isp4022;
252 };
253};
254
255
256#endif /* _QL4XNVRM_H_ */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
new file mode 100644
index 00000000000..5036ebf013a
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -0,0 +1,1755 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
8
9#include <scsi/scsi_tcq.h>
10#include <scsi/scsicam.h>
11
12#include "ql4_def.h"
13
14/*
15 * Driver version
16 */
17char qla4xxx_version_str[40];
18
19/*
20 * SRB allocation cache
21 */
22static kmem_cache_t *srb_cachep;
23
24/*
25 * Module parameter information and variables
26 */
27int ql4xdiscoverywait = 60;
28module_param(ql4xdiscoverywait, int, S_IRUGO | S_IRUSR);
29MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
30int ql4xdontresethba = 0;
31module_param(ql4xdontresethba, int, S_IRUGO | S_IRUSR);
32MODULE_PARM_DESC(ql4xdontresethba,
33 "Dont reset the HBA when the driver gets 0x8002 AEN "
34 " default it will reset hba :0"
35 " set to 1 to avoid resetting HBA");
36
37int extended_error_logging = 0; /* 0 = off, 1 = log errors */
38module_param(extended_error_logging, int, S_IRUGO | S_IRUSR);
39MODULE_PARM_DESC(extended_error_logging,
40 "Option to enable extended error logging, "
41 "Default is 0 - no logging, 1 - debug logging");
42
43/*
44 * SCSI host template entry points
45 */
46
47void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
48
49/*
50 * iSCSI template entry points
51 */
52static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
53 uint32_t enable, struct sockaddr *dst_addr);
54static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
55 enum iscsi_param param, char *buf);
56static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
57 enum iscsi_param param, char *buf);
58static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
59static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
60static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
61
62/*
63 * SCSI host template entry points
64 */
65static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
66 void (*done) (struct scsi_cmnd *));
67static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
68static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
69static int qla4xxx_slave_alloc(struct scsi_device *device);
70static int qla4xxx_slave_configure(struct scsi_device *device);
71static void qla4xxx_slave_destroy(struct scsi_device *sdev);
72
73static struct scsi_host_template qla4xxx_driver_template = {
74 .module = THIS_MODULE,
75 .name = DRIVER_NAME,
76 .proc_name = DRIVER_NAME,
77 .queuecommand = qla4xxx_queuecommand,
78
79 .eh_device_reset_handler = qla4xxx_eh_device_reset,
80 .eh_host_reset_handler = qla4xxx_eh_host_reset,
81
82 .slave_configure = qla4xxx_slave_configure,
83 .slave_alloc = qla4xxx_slave_alloc,
84 .slave_destroy = qla4xxx_slave_destroy,
85
86 .this_id = -1,
87 .cmd_per_lun = 3,
88 .use_clustering = ENABLE_CLUSTERING,
89 .sg_tablesize = SG_ALL,
90
91 .max_sectors = 0xFFFF,
92};
93
94static struct iscsi_transport qla4xxx_iscsi_transport = {
95 .owner = THIS_MODULE,
96 .name = DRIVER_NAME,
97 .param_mask = ISCSI_CONN_PORT |
98 ISCSI_CONN_ADDRESS |
99 ISCSI_TARGET_NAME |
100 ISCSI_TPGT,
101 .sessiondata_size = sizeof(struct ddb_entry),
102 .host_template = &qla4xxx_driver_template,
103
104 .tgt_dscvr = qla4xxx_tgt_dscvr,
105 .get_conn_param = qla4xxx_conn_get_param,
106 .get_session_param = qla4xxx_sess_get_param,
107 .start_conn = qla4xxx_conn_start,
108 .stop_conn = qla4xxx_conn_stop,
109 .session_recovery_timedout = qla4xxx_recovery_timedout,
110};
111
112static struct scsi_transport_template *qla4xxx_scsi_transport;
113
114static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
115{
116 struct ddb_entry *ddb_entry = session->dd_data;
117 struct scsi_qla_host *ha = ddb_entry->ha;
118
119 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) "
120 "secs exhausted, marking device DEAD.\n", ha->host_no,
121 __func__, ddb_entry->fw_ddb_index,
122 ha->port_down_retry_count));
123
124 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
125
126 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
127 "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
128 queue_work(ha->dpc_thread, &ha->dpc_work);
129}
130
131static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
132{
133 struct iscsi_cls_session *session;
134 struct ddb_entry *ddb_entry;
135
136 session = iscsi_dev_to_session(conn->dev.parent);
137 ddb_entry = session->dd_data;
138
139 DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
140 ddb_entry->ha->host_no, __func__,
141 ddb_entry->fw_ddb_index));
142 iscsi_unblock_session(session);
143 return 0;
144}
145
146static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
147{
148 struct iscsi_cls_session *session;
149 struct ddb_entry *ddb_entry;
150
151 session = iscsi_dev_to_session(conn->dev.parent);
152 ddb_entry = session->dd_data;
153
154 DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
155 ddb_entry->ha->host_no, __func__,
156 ddb_entry->fw_ddb_index));
157 if (flag == STOP_CONN_RECOVER)
158 iscsi_block_session(session);
159 else
160 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
161}
162
163static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
164 enum iscsi_param param, char *buf)
165{
166 struct ddb_entry *ddb_entry = sess->dd_data;
167 int len;
168
169 switch (param) {
170 case ISCSI_PARAM_TARGET_NAME:
171 len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
172 ddb_entry->iscsi_name);
173 break;
174 case ISCSI_PARAM_TPGT:
175 len = sprintf(buf, "%u\n", ddb_entry->tpgt);
176 break;
177 default:
178 return -ENOSYS;
179 }
180
181 return len;
182}
183
184static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
185 enum iscsi_param param, char *buf)
186{
187 struct iscsi_cls_session *session;
188 struct ddb_entry *ddb_entry;
189 int len;
190
191 session = iscsi_dev_to_session(conn->dev.parent);
192 ddb_entry = session->dd_data;
193
194 switch (param) {
195 case ISCSI_PARAM_CONN_PORT:
196 len = sprintf(buf, "%hu\n", ddb_entry->port);
197 break;
198 case ISCSI_PARAM_CONN_ADDRESS:
199 /* TODO: what are the ipv6 bits */
200 len = sprintf(buf, "%u.%u.%u.%u\n",
201 NIPQUAD(ddb_entry->ip_addr));
202 break;
203 default:
204 return -ENOSYS;
205 }
206
207 return len;
208}
209
210static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
211 uint32_t enable, struct sockaddr *dst_addr)
212{
213 struct scsi_qla_host *ha;
214 struct Scsi_Host *shost;
215 struct sockaddr_in *addr;
216 struct sockaddr_in6 *addr6;
217 int ret = 0;
218
219 shost = scsi_host_lookup(host_no);
220 if (IS_ERR(shost)) {
221 printk(KERN_ERR "Could not find host no %u\n", host_no);
222 return -ENODEV;
223 }
224
225 ha = (struct scsi_qla_host *) shost->hostdata;
226
227 switch (type) {
228 case ISCSI_TGT_DSCVR_SEND_TARGETS:
229 if (dst_addr->sa_family == AF_INET) {
230 addr = (struct sockaddr_in *)dst_addr;
231 if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
232 addr->sin_port) != QLA_SUCCESS)
233 ret = -EIO;
234 } else if (dst_addr->sa_family == AF_INET6) {
235 /*
236 * TODO: fix qla4xxx_send_tgts
237 */
238 addr6 = (struct sockaddr_in6 *)dst_addr;
239 if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
240 addr6->sin6_port) != QLA_SUCCESS)
241 ret = -EIO;
242 } else
243 ret = -ENOSYS;
244 break;
245 default:
246 ret = -ENOSYS;
247 }
248
249 scsi_host_put(shost);
250 return ret;
251}
252
253void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
254{
255 if (!ddb_entry->sess)
256 return;
257
258 if (ddb_entry->conn) {
259 iscsi_if_destroy_session_done(ddb_entry->conn);
260 iscsi_destroy_conn(ddb_entry->conn);
261 iscsi_remove_session(ddb_entry->sess);
262 }
263 iscsi_free_session(ddb_entry->sess);
264}
265
266int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
267{
268 int err;
269
270 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
271 if (err) {
272 DEBUG2(printk(KERN_ERR "Could not add session.\n"));
273 return err;
274 }
275
276 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
277 if (!ddb_entry->conn) {
278 iscsi_remove_session(ddb_entry->sess);
279 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
280 return -ENOMEM;
281 }
282
283 ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
284 iscsi_if_create_session_done(ddb_entry->conn);
285 return 0;
286}
287
288struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
289{
290 struct ddb_entry *ddb_entry;
291 struct iscsi_cls_session *sess;
292
293 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
294 if (!sess)
295 return NULL;
296
297 ddb_entry = sess->dd_data;
298 memset(ddb_entry, 0, sizeof(*ddb_entry));
299 ddb_entry->ha = ha;
300 ddb_entry->sess = sess;
301 return ddb_entry;
302}
303
304/*
305 * Timer routines
306 */
307
308static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
309 unsigned long interval)
310{
311 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
312 __func__, ha->host->host_no));
313 init_timer(&ha->timer);
314 ha->timer.expires = jiffies + interval * HZ;
315 ha->timer.data = (unsigned long)ha;
316 ha->timer.function = (void (*)(unsigned long))func;
317 add_timer(&ha->timer);
318 ha->timer_active = 1;
319}
320
321static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
322{
323 del_timer_sync(&ha->timer);
324 ha->timer_active = 0;
325}
326
327/***
328 * qla4xxx_mark_device_missing - mark a device as missing.
329 * @ha: Pointer to host adapter structure.
330 * @ddb_entry: Pointer to device database entry
331 *
332 * This routine marks a device missing and resets the relogin retry count.
333 **/
334void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
335 struct ddb_entry *ddb_entry)
336{
337 atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
338 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
339 ha->host_no, ddb_entry->bus, ddb_entry->target,
340 ddb_entry->fw_ddb_index));
341 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
342}
343
344static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
345 struct ddb_entry *ddb_entry,
346 struct scsi_cmnd *cmd,
347 void (*done)(struct scsi_cmnd *))
348{
349 struct srb *srb;
350
351 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
352 if (!srb)
353 return srb;
354
355 atomic_set(&srb->ref_count, 1);
356 srb->ha = ha;
357 srb->ddb = ddb_entry;
358 srb->cmd = cmd;
359 srb->flags = 0;
360 cmd->SCp.ptr = (void *)srb;
361 cmd->scsi_done = done;
362
363 return srb;
364}
365
366static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
367{
368 struct scsi_cmnd *cmd = srb->cmd;
369
370 if (srb->flags & SRB_DMA_VALID) {
371 if (cmd->use_sg) {
372 pci_unmap_sg(ha->pdev, cmd->request_buffer,
373 cmd->use_sg, cmd->sc_data_direction);
374 } else if (cmd->request_bufflen) {
375 pci_unmap_single(ha->pdev, srb->dma_handle,
376 cmd->request_bufflen,
377 cmd->sc_data_direction);
378 }
379 srb->flags &= ~SRB_DMA_VALID;
380 }
381 cmd->SCp.ptr = NULL;
382}
383
384void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
385{
386 struct scsi_cmnd *cmd = srb->cmd;
387
388 qla4xxx_srb_free_dma(ha, srb);
389
390 mempool_free(srb, ha->srb_mempool);
391
392 cmd->scsi_done(cmd);
393}
394
395/**
396 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
397 * @cmd: Pointer to Linux's SCSI command structure
398 * @done_fn: Function that the driver calls to notify the SCSI mid-layer
399 * that the command has been processed.
400 *
401 * Remarks:
402 * This routine is invoked by Linux to send a SCSI command to the driver.
403 * The mid-level driver tries to ensure that queuecommand never gets
404 * invoked concurrently with itself or the interrupt handler (although
405 * the interrupt handler may call this routine as part of request-
406 * completion handling). Unfortunely, it sometimes calls the scheduler
407 * in interrupt context which is a big NO! NO!.
408 **/
409static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
410 void (*done)(struct scsi_cmnd *))
411{
412 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
413 struct ddb_entry *ddb_entry = cmd->device->hostdata;
414 struct srb *srb;
415 int rval;
416
417 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
418 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
419 cmd->result = DID_NO_CONNECT << 16;
420 goto qc_fail_command;
421 }
422 goto qc_host_busy;
423 }
424
425 spin_unlock_irq(ha->host->host_lock);
426
427 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
428 if (!srb)
429 goto qc_host_busy_lock;
430
431 rval = qla4xxx_send_command_to_isp(ha, srb);
432 if (rval != QLA_SUCCESS)
433 goto qc_host_busy_free_sp;
434
435 spin_lock_irq(ha->host->host_lock);
436 return 0;
437
438qc_host_busy_free_sp:
439 qla4xxx_srb_free_dma(ha, srb);
440 mempool_free(srb, ha->srb_mempool);
441
442qc_host_busy_lock:
443 spin_lock_irq(ha->host->host_lock);
444
445qc_host_busy:
446 return SCSI_MLQUEUE_HOST_BUSY;
447
448qc_fail_command:
449 done(cmd);
450
451 return 0;
452}
453
454/**
455 * qla4xxx_mem_free - frees memory allocated to adapter
456 * @ha: Pointer to host adapter structure.
457 *
458 * Frees memory previously allocated by qla4xxx_mem_alloc
459 **/
460static void qla4xxx_mem_free(struct scsi_qla_host *ha)
461{
462 if (ha->queues)
463 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
464 ha->queues_dma);
465
466 ha->queues_len = 0;
467 ha->queues = NULL;
468 ha->queues_dma = 0;
469 ha->request_ring = NULL;
470 ha->request_dma = 0;
471 ha->response_ring = NULL;
472 ha->response_dma = 0;
473 ha->shadow_regs = NULL;
474 ha->shadow_regs_dma = 0;
475
476 /* Free srb pool. */
477 if (ha->srb_mempool)
478 mempool_destroy(ha->srb_mempool);
479
480 ha->srb_mempool = NULL;
481
482 /* release io space registers */
483 if (ha->reg)
484 iounmap(ha->reg);
485 pci_release_regions(ha->pdev);
486}
487
488/**
489 * qla4xxx_mem_alloc - allocates memory for use by adapter.
490 * @ha: Pointer to host adapter structure
491 *
492 * Allocates DMA memory for request and response queues. Also allocates memory
493 * for srbs.
494 **/
495static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
496{
497 unsigned long align;
498
499 /* Allocate contiguous block of DMA memory for queues. */
500 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
501 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
502 sizeof(struct shadow_regs) +
503 MEM_ALIGN_VALUE +
504 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
505 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
506 &ha->queues_dma, GFP_KERNEL);
507 if (ha->queues == NULL) {
508 dev_warn(&ha->pdev->dev,
509 "Memory Allocation failed - queues.\n");
510
511 goto mem_alloc_error_exit;
512 }
513 memset(ha->queues, 0, ha->queues_len);
514
515 /*
516 * As per RISC alignment requirements -- the bus-address must be a
517 * multiple of the request-ring size (in bytes).
518 */
519 align = 0;
520 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
521 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
522 (MEM_ALIGN_VALUE - 1));
523
524 /* Update request and response queue pointers. */
525 ha->request_dma = ha->queues_dma + align;
526 ha->request_ring = (struct queue_entry *) (ha->queues + align);
527 ha->response_dma = ha->queues_dma + align +
528 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
529 ha->response_ring = (struct queue_entry *) (ha->queues + align +
530 (REQUEST_QUEUE_DEPTH *
531 QUEUE_SIZE));
532 ha->shadow_regs_dma = ha->queues_dma + align +
533 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
534 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
535 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
536 (REQUEST_QUEUE_DEPTH *
537 QUEUE_SIZE) +
538 (RESPONSE_QUEUE_DEPTH *
539 QUEUE_SIZE));
540
541 /* Allocate memory for srb pool. */
542 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
543 mempool_free_slab, srb_cachep);
544 if (ha->srb_mempool == NULL) {
545 dev_warn(&ha->pdev->dev,
546 "Memory Allocation failed - SRB Pool.\n");
547
548 goto mem_alloc_error_exit;
549 }
550
551 return QLA_SUCCESS;
552
553mem_alloc_error_exit:
554 qla4xxx_mem_free(ha);
555 return QLA_ERROR;
556}
557
558/**
559 * qla4xxx_timer - checks every second for work to do.
560 * @ha: Pointer to host adapter structure.
561 **/
562static void qla4xxx_timer(struct scsi_qla_host *ha)
563{
564 struct ddb_entry *ddb_entry, *dtemp;
565 int start_dpc = 0;
566
567 /* Search for relogin's to time-out and port down retry. */
568 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
569 /* Count down time between sending relogins */
570 if (adapter_up(ha) &&
571 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
572 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
573 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
574 INVALID_ENTRY) {
575 if (atomic_read(&ddb_entry->retry_relogin_timer)
576 == 0) {
577 atomic_set(&ddb_entry->
578 retry_relogin_timer,
579 INVALID_ENTRY);
580 set_bit(DPC_RELOGIN_DEVICE,
581 &ha->dpc_flags);
582 set_bit(DF_RELOGIN, &ddb_entry->flags);
583 DEBUG2(printk("scsi%ld: %s: index [%d]"
584 " login device\n",
585 ha->host_no, __func__,
586 ddb_entry->fw_ddb_index));
587 } else
588 atomic_dec(&ddb_entry->
589 retry_relogin_timer);
590 }
591 }
592
593 /* Wait for relogin to timeout */
594 if (atomic_read(&ddb_entry->relogin_timer) &&
595 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
596 /*
597 * If the relogin times out and the device is
598 * still NOT ONLINE then try and relogin again.
599 */
600 if (atomic_read(&ddb_entry->state) !=
601 DDB_STATE_ONLINE &&
602 ddb_entry->fw_ddb_device_state ==
603 DDB_DS_SESSION_FAILED) {
604 /* Reset retry relogin timer */
605 atomic_inc(&ddb_entry->relogin_retry_count);
606 DEBUG2(printk("scsi%ld: index[%d] relogin"
607 " timed out-retrying"
608 " relogin (%d)\n",
609 ha->host_no,
610 ddb_entry->fw_ddb_index,
611 atomic_read(&ddb_entry->
612 relogin_retry_count))
613 );
614 start_dpc++;
615 DEBUG(printk("scsi%ld:%d:%d: index [%d] "
616 "initate relogin after"
617 " %d seconds\n",
618 ha->host_no, ddb_entry->bus,
619 ddb_entry->target,
620 ddb_entry->fw_ddb_index,
621 ddb_entry->default_time2wait + 4)
622 );
623
624 atomic_set(&ddb_entry->retry_relogin_timer,
625 ddb_entry->default_time2wait + 4);
626 }
627 }
628 }
629
630 /* Check for heartbeat interval. */
631 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
632 ha->heartbeat_interval != 0) {
633 ha->seconds_since_last_heartbeat++;
634 if (ha->seconds_since_last_heartbeat >
635 ha->heartbeat_interval + 2)
636 set_bit(DPC_RESET_HA, &ha->dpc_flags);
637 }
638
639
640 /* Wakeup the dpc routine for this adapter, if needed. */
641 if ((start_dpc ||
642 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
643 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
644 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
645 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
646 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
647 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
648 test_bit(DPC_AEN, &ha->dpc_flags)) &&
649 ha->dpc_thread) {
650 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
651 " - dpc flags = 0x%lx\n",
652 ha->host_no, __func__, ha->dpc_flags));
653 queue_work(ha->dpc_thread, &ha->dpc_work);
654 }
655
656 /* Reschedule timer thread to call us back in one second */
657 mod_timer(&ha->timer, jiffies + HZ);
658
659 DEBUG2(ha->seconds_since_last_intr++);
660}
661
662/**
663 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
664 * @ha: Pointer to host adapter structure.
665 *
666 * This routine stalls the driver until all outstanding commands are returned.
667 * Caller must release the Hardware Lock prior to calling this routine.
668 **/
669static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
670{
671 uint32_t index = 0;
672 int stat = QLA_SUCCESS;
673 unsigned long flags;
674 struct scsi_cmnd *cmd;
675 int wait_cnt = WAIT_CMD_TOV; /*
676 * Initialized for 30 seconds as we
677 * expect all commands to retuned
678 * ASAP.
679 */
680
681 while (wait_cnt) {
682 spin_lock_irqsave(&ha->hardware_lock, flags);
683 /* Find a command that hasn't completed. */
684 for (index = 0; index < ha->host->can_queue; index++) {
685 cmd = scsi_host_find_tag(ha->host, index);
686 if (cmd != NULL)
687 break;
688 }
689 spin_unlock_irqrestore(&ha->hardware_lock, flags);
690
691 /* If No Commands are pending, wait is complete */
692 if (index == ha->host->can_queue) {
693 break;
694 }
695
696 /* If we timed out on waiting for commands to come back
697 * return ERROR.
698 */
699 wait_cnt--;
700 if (wait_cnt == 0)
701 stat = QLA_ERROR;
702 else {
703 msleep(1000);
704 }
705 } /* End of While (wait_cnt) */
706
707 return stat;
708}
709
710/**
711 * qla4010_soft_reset - performs soft reset.
712 * @ha: Pointer to host adapter structure.
713 **/
714static int qla4010_soft_reset(struct scsi_qla_host *ha)
715{
716 uint32_t max_wait_time;
717 unsigned long flags = 0;
718 int status = QLA_ERROR;
719 uint32_t ctrl_status;
720
721 spin_lock_irqsave(&ha->hardware_lock, flags);
722
723 /*
724 * If the SCSI Reset Interrupt bit is set, clear it.
725 * Otherwise, the Soft Reset won't work.
726 */
727 ctrl_status = readw(&ha->reg->ctrl_status);
728 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
729 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
730
731 /* Issue Soft Reset */
732 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
733 readl(&ha->reg->ctrl_status);
734
735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
736
737 /* Wait until the Network Reset Intr bit is cleared */
738 max_wait_time = RESET_INTR_TOV;
739 do {
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741 ctrl_status = readw(&ha->reg->ctrl_status);
742 spin_unlock_irqrestore(&ha->hardware_lock, flags);
743
744 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
745 break;
746
747 msleep(1000);
748 } while ((--max_wait_time));
749
750 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
751 DEBUG2(printk(KERN_WARNING
752 "scsi%ld: Network Reset Intr not cleared by "
753 "Network function, clearing it now!\n",
754 ha->host_no));
755 spin_lock_irqsave(&ha->hardware_lock, flags);
756 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
757 readl(&ha->reg->ctrl_status);
758 spin_unlock_irqrestore(&ha->hardware_lock, flags);
759 }
760
761 /* Wait until the firmware tells us the Soft Reset is done */
762 max_wait_time = SOFT_RESET_TOV;
763 do {
764 spin_lock_irqsave(&ha->hardware_lock, flags);
765 ctrl_status = readw(&ha->reg->ctrl_status);
766 spin_unlock_irqrestore(&ha->hardware_lock, flags);
767
768 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
769 status = QLA_SUCCESS;
770 break;
771 }
772
773 msleep(1000);
774 } while ((--max_wait_time));
775
776 /*
777 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
778 * after the soft reset has taken place.
779 */
780 spin_lock_irqsave(&ha->hardware_lock, flags);
781 ctrl_status = readw(&ha->reg->ctrl_status);
782 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
783 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
784 readl(&ha->reg->ctrl_status);
785 }
786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
787
788 /* If soft reset fails then most probably the bios on other
789 * function is also enabled.
790 * Since the initialization is sequential the other fn
791 * wont be able to acknowledge the soft reset.
792 * Issue a force soft reset to workaround this scenario.
793 */
794 if (max_wait_time == 0) {
795 /* Issue Force Soft Reset */
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
798 readl(&ha->reg->ctrl_status);
799 spin_unlock_irqrestore(&ha->hardware_lock, flags);
800 /* Wait until the firmware tells us the Soft Reset is done */
801 max_wait_time = SOFT_RESET_TOV;
802 do {
803 spin_lock_irqsave(&ha->hardware_lock, flags);
804 ctrl_status = readw(&ha->reg->ctrl_status);
805 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806
807 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
808 status = QLA_SUCCESS;
809 break;
810 }
811
812 msleep(1000);
813 } while ((--max_wait_time));
814 }
815
816 return status;
817}
818
819/**
820 * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
821 * @ha: Pointer to host adapter structure.
822 **/
823static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
824{
825 unsigned long flags;
826
827 ql4xxx_lock_nvram(ha);
828 spin_lock_irqsave(&ha->hardware_lock, flags);
829 writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
830 readl(isp_gp_out(ha));
831 mdelay(1);
832
833 writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
834 readl(isp_gp_out(ha));
835 spin_unlock_irqrestore(&ha->hardware_lock, flags);
836 mdelay(2523);
837
838 ql4xxx_unlock_nvram(ha);
839 return QLA_SUCCESS;
840}
841
842/**
843 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
844 * @ha: Pointer to host adapter structure.
845 *
846 * This routine is called just prior to a HARD RESET to return all
847 * outstanding commands back to the Operating System.
848 * Caller should make sure that the following locks are released
849 * before this calling routine: Hardware lock, and io_request_lock.
850 **/
851static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
852{
853 struct srb *srb;
854 int i;
855 unsigned long flags;
856
857 spin_lock_irqsave(&ha->hardware_lock, flags);
858 for (i = 0; i < ha->host->can_queue; i++) {
859 srb = qla4xxx_del_from_active_array(ha, i);
860 if (srb != NULL) {
861 srb->cmd->result = DID_RESET << 16;
862 qla4xxx_srb_compl(ha, srb);
863 }
864 }
865 spin_unlock_irqrestore(&ha->hardware_lock, flags);
866
867}
868
869/**
870 * qla4xxx_hard_reset - performs HBA Hard Reset
871 * @ha: Pointer to host adapter structure.
872 **/
873static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
874{
875 /* The QLA4010 really doesn't have an equivalent to a hard reset */
876 qla4xxx_flush_active_srbs(ha);
877 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
878 int status = QLA_ERROR;
879
880 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
881 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
882 (qla4010_soft_reset(ha) == QLA_SUCCESS))
883 status = QLA_SUCCESS;
884 return status;
885 } else
886 return qla4010_soft_reset(ha);
887}
888
889/**
890 * qla4xxx_recover_adapter - recovers adapter after a fatal error
891 * @ha: Pointer to host adapter structure.
892 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
893 * after adapter recovery has completed.
894 * 0=preserve ddb list, 1=destroy and rebuild ddb list
895 **/
896static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
897 uint8_t renew_ddb_list)
898{
899 int status;
900
901 /* Stall incoming I/O until we are done */
902 clear_bit(AF_ONLINE, &ha->flags);
903 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
904 __func__));
905
906 /* Wait for outstanding commands to complete.
907 * Stalls the driver for max 30 secs
908 */
909 status = qla4xxx_cmd_wait(ha);
910
911 qla4xxx_disable_intrs(ha);
912
913 /* Flush any pending ddb changed AENs */
914 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
915
916 /* Reset the firmware. If successful, function
917 * returns with ISP interrupts enabled.
918 */
919 if (status == QLA_SUCCESS) {
920 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
921 ha->host_no, __func__));
922 status = qla4xxx_soft_reset(ha);
923 }
924 /* FIXMEkaren: Do we want to keep interrupts enabled and process
925 AENs after soft reset */
926
927 /* If firmware (SOFT) reset failed, or if all outstanding
928 * commands have not returned, then do a HARD reset.
929 */
930 if (status == QLA_ERROR) {
931 DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
932 ha->host_no, __func__));
933 status = qla4xxx_hard_reset(ha);
934 }
935
936 /* Flush any pending ddb changed AENs */
937 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
938
939 /* Re-initialize firmware. If successful, function returns
940 * with ISP interrupts enabled */
941 if (status == QLA_SUCCESS) {
942 DEBUG2(printk("scsi%ld: %s - Initializing adapter..\n",
943 ha->host_no, __func__));
944
945 /* If successful, AF_ONLINE flag set in
946 * qla4xxx_initialize_adapter */
947 status = qla4xxx_initialize_adapter(ha, renew_ddb_list);
948 }
949
950 /* Failed adapter initialization?
951 * Retry reset_ha only if invoked via DPC (DPC_RESET_HA) */
952 if ((test_bit(AF_ONLINE, &ha->flags) == 0) &&
953 (test_bit(DPC_RESET_HA, &ha->dpc_flags))) {
954 /* Adapter initialization failed, see if we can retry
955 * resetting the ha */
956 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
957 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
958 DEBUG2(printk("scsi%ld: recover adapter - retrying "
959 "(%d) more times\n", ha->host_no,
960 ha->retry_reset_ha_cnt));
961 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
962 status = QLA_ERROR;
963 } else {
964 if (ha->retry_reset_ha_cnt > 0) {
965 /* Schedule another Reset HA--DPC will retry */
966 ha->retry_reset_ha_cnt--;
967 DEBUG2(printk("scsi%ld: recover adapter - "
968 "retry remaining %d\n",
969 ha->host_no,
970 ha->retry_reset_ha_cnt));
971 status = QLA_ERROR;
972 }
973
974 if (ha->retry_reset_ha_cnt == 0) {
975 /* Recover adapter retries have been exhausted.
976 * Adapter DEAD */
977 DEBUG2(printk("scsi%ld: recover adapter "
978 "failed - board disabled\n",
979 ha->host_no));
980 qla4xxx_flush_active_srbs(ha);
981 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
982 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
983 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST,
984 &ha->dpc_flags);
985 status = QLA_ERROR;
986 }
987 }
988 } else {
989 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
990 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags);
991 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
992 }
993
994 ha->adapter_error_count++;
995
996 if (status == QLA_SUCCESS)
997 qla4xxx_enable_intrs(ha);
998
999 DEBUG2(printk("scsi%ld: recover adapter .. DONE\n", ha->host_no));
1000 return status;
1001}
1002
1003/**
1004 * qla4xxx_do_dpc - dpc routine
1005 * @data: in our case pointer to adapter structure
1006 *
1007 * This routine is a task that is schedule by the interrupt handler
1008 * to perform the background processing for interrupts. We put it
1009 * on a task queue that is consumed whenever the scheduler runs; that's
1010 * so you can do anything (i.e. put the process to sleep etc). In fact,
1011 * the mid-level tries to sleep when it reaches the driver threshold
1012 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1013 **/
1014static void qla4xxx_do_dpc(void *data)
1015{
1016 struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
1017 struct ddb_entry *ddb_entry, *dtemp;
1018
1019 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
1020 ha->host_no, __func__));
1021
1022 DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
1023 ha->host_no, __func__, ha->flags));
1024 DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
1025 ha->host_no, __func__, ha->dpc_flags));
1026
1027 /* Initialization not yet finished. Don't do anything yet. */
1028 if (!test_bit(AF_INIT_DONE, &ha->flags))
1029 return;
1030
1031 if (adapter_up(ha) ||
1032 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1033 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1034 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
1035 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
1036 /*
1037 * dg 09/23 Never initialize ddb list
1038 * once we up and running
1039 * qla4xxx_recover_adapter(ha,
1040 * REBUILD_DDB_LIST);
1041 */
1042 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1043
1044 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1045 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1046
1047 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1048 uint8_t wait_time = RESET_INTR_TOV;
1049 unsigned long flags = 0;
1050
1051 qla4xxx_flush_active_srbs(ha);
1052
1053 spin_lock_irqsave(&ha->hardware_lock, flags);
1054 while ((readw(&ha->reg->ctrl_status) &
1055 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
1056 if (--wait_time == 0)
1057 break;
1058
1059 spin_unlock_irqrestore(&ha->hardware_lock,
1060 flags);
1061
1062 msleep(1000);
1063
1064 spin_lock_irqsave(&ha->hardware_lock, flags);
1065 }
1066 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1067
1068 if (wait_time == 0)
1069 DEBUG2(printk("scsi%ld: %s: SR|FSR "
1070 "bit not cleared-- resetting\n",
1071 ha->host_no, __func__));
1072 }
1073 }
1074
1075 /* ---- process AEN? --- */
1076 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
1077 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
1078
1079 /* ---- Get DHCP IP Address? --- */
1080 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
1081 qla4xxx_get_dhcp_ip_address(ha);
1082
1083 /* ---- relogin device? --- */
1084 if (adapter_up(ha) &&
1085 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
1086 list_for_each_entry_safe(ddb_entry, dtemp,
1087 &ha->ddb_list, list) {
1088 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
1089 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
1090 qla4xxx_relogin_device(ha, ddb_entry);
1091
1092 /*
1093 * If mbx cmd times out there is no point
1094 * in continuing further.
1095 * With large no of targets this can hang
1096 * the system.
1097 */
1098 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1099 printk(KERN_WARNING "scsi%ld: %s: "
1100 "need to reset hba\n",
1101 ha->host_no, __func__);
1102 break;
1103 }
1104 }
1105 }
1106}
1107
1108/**
1109 * qla4xxx_free_adapter - release the adapter
1110 * @ha: pointer to adapter structure
1111 **/
1112static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1113{
1114
1115 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
1116 /* Turn-off interrupts on the card. */
1117 qla4xxx_disable_intrs(ha);
1118 }
1119
1120 /* Kill the kernel thread for this host */
1121 if (ha->dpc_thread)
1122 destroy_workqueue(ha->dpc_thread);
1123
1124 /* Issue Soft Reset to put firmware in unknown state */
1125 qla4xxx_soft_reset(ha);
1126
1127 /* Remove timer thread, if present */
1128 if (ha->timer_active)
1129 qla4xxx_stop_timer(ha);
1130
1131 /* free extra memory */
1132 qla4xxx_mem_free(ha);
1133
1134 /* Detach interrupts */
1135 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1136 free_irq(ha->pdev->irq, ha);
1137
1138 pci_disable_device(ha->pdev);
1139
1140}
1141
1142/***
1143 * qla4xxx_iospace_config - maps registers
1144 * @ha: pointer to adapter structure
1145 *
1146 * This routines maps HBA's registers from the pci address space
1147 * into the kernel virtual address space for memory mapped i/o.
1148 **/
1149static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1150{
1151 unsigned long pio, pio_len, pio_flags;
1152 unsigned long mmio, mmio_len, mmio_flags;
1153
1154 pio = pci_resource_start(ha->pdev, 0);
1155 pio_len = pci_resource_len(ha->pdev, 0);
1156 pio_flags = pci_resource_flags(ha->pdev, 0);
1157 if (pio_flags & IORESOURCE_IO) {
1158 if (pio_len < MIN_IOBASE_LEN) {
1159 dev_warn(&ha->pdev->dev,
1160 "Invalid PCI I/O region size\n");
1161 pio = 0;
1162 }
1163 } else {
1164 dev_warn(&ha->pdev->dev, "region #0 not a PIO resource\n");
1165 pio = 0;
1166 }
1167
1168 /* Use MMIO operations for all accesses. */
1169 mmio = pci_resource_start(ha->pdev, 1);
1170 mmio_len = pci_resource_len(ha->pdev, 1);
1171 mmio_flags = pci_resource_flags(ha->pdev, 1);
1172
1173 if (!(mmio_flags & IORESOURCE_MEM)) {
1174 dev_err(&ha->pdev->dev,
1175 "region #0 not an MMIO resource, aborting\n");
1176
1177 goto iospace_error_exit;
1178 }
1179 if (mmio_len < MIN_IOBASE_LEN) {
1180 dev_err(&ha->pdev->dev,
1181 "Invalid PCI mem region size, aborting\n");
1182 goto iospace_error_exit;
1183 }
1184
1185 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
1186 dev_warn(&ha->pdev->dev,
1187 "Failed to reserve PIO/MMIO regions\n");
1188
1189 goto iospace_error_exit;
1190 }
1191
1192 ha->pio_address = pio;
1193 ha->pio_length = pio_len;
1194 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
1195 if (!ha->reg) {
1196 dev_err(&ha->pdev->dev,
1197 "cannot remap MMIO, aborting\n");
1198
1199 goto iospace_error_exit;
1200 }
1201
1202 return 0;
1203
1204iospace_error_exit:
1205 return -ENOMEM;
1206}
1207
1208/**
1209 * qla4xxx_probe_adapter - callback function to probe HBA
1210 * @pdev: pointer to pci_dev structure
1211 * @pci_device_id: pointer to pci_device entry
1212 *
1213 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
1214 * It returns zero if successful. It also initializes all data necessary for
1215 * the driver.
1216 **/
1217static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1218 const struct pci_device_id *ent)
1219{
1220 int ret = -ENODEV, status;
1221 struct Scsi_Host *host;
1222 struct scsi_qla_host *ha;
1223 struct ddb_entry *ddb_entry, *ddbtemp;
1224 uint8_t init_retry_count = 0;
1225 char buf[34];
1226
1227 if (pci_enable_device(pdev))
1228 return -1;
1229
1230 host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
1231 if (host == NULL) {
1232 printk(KERN_WARNING
1233 "qla4xxx: Couldn't allocate host from scsi layer!\n");
1234 goto probe_disable_device;
1235 }
1236
1237 /* Clear our data area */
1238 ha = (struct scsi_qla_host *) host->hostdata;
1239 memset(ha, 0, sizeof(*ha));
1240
1241 /* Save the information from PCI BIOS. */
1242 ha->pdev = pdev;
1243 ha->host = host;
1244 ha->host_no = host->host_no;
1245
1246 /* Configure PCI I/O space. */
1247 ret = qla4xxx_iospace_config(ha);
1248 if (ret)
1249 goto probe_failed;
1250
1251 dev_info(&ha->pdev->dev, "Found an ISP%04x, irq %d, iobase 0x%p\n",
1252 pdev->device, pdev->irq, ha->reg);
1253
1254 qla4xxx_config_dma_addressing(ha);
1255
1256 /* Initialize lists and spinlocks. */
1257 INIT_LIST_HEAD(&ha->ddb_list);
1258 INIT_LIST_HEAD(&ha->free_srb_q);
1259
1260 mutex_init(&ha->mbox_sem);
1261 init_waitqueue_head(&ha->mailbox_wait_queue);
1262
1263 spin_lock_init(&ha->hardware_lock);
1264 spin_lock_init(&ha->list_lock);
1265
1266 /* Allocate dma buffers */
1267 if (qla4xxx_mem_alloc(ha)) {
1268 dev_warn(&ha->pdev->dev,
1269 "[ERROR] Failed to allocate memory for adapter\n");
1270
1271 ret = -ENOMEM;
1272 goto probe_failed;
1273 }
1274
1275 /*
1276 * Initialize the Host adapter request/response queues and
1277 * firmware
1278 * NOTE: interrupts enabled upon successful completion
1279 */
1280 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1281 while (status == QLA_ERROR && init_retry_count++ < MAX_INIT_RETRIES) {
1282 DEBUG2(printk("scsi: %s: retrying adapter initialization "
1283 "(%d)\n", __func__, init_retry_count));
1284 qla4xxx_soft_reset(ha);
1285 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1286 }
1287 if (status == QLA_ERROR) {
1288 dev_warn(&ha->pdev->dev, "Failed to initialize adapter\n");
1289
1290 ret = -ENODEV;
1291 goto probe_failed;
1292 }
1293
1294 host->cmd_per_lun = 3;
1295 host->max_channel = 0;
1296 host->max_lun = MAX_LUNS - 1;
1297 host->max_id = MAX_TARGETS;
1298 host->max_cmd_len = IOCB_MAX_CDB_LEN;
1299 host->can_queue = MAX_SRBS ;
1300 host->transportt = qla4xxx_scsi_transport;
1301
1302 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
1303 if (ret) {
1304 dev_warn(&ha->pdev->dev, "scsi_init_shared_tag_map failed");
1305 goto probe_failed;
1306 }
1307
1308 /* Startup the kernel thread for this host adapter. */
1309 DEBUG2(printk("scsi: %s: Starting kernel thread for "
1310 "qla4xxx_dpc\n", __func__));
1311 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
1312 ha->dpc_thread = create_singlethread_workqueue(buf);
1313 if (!ha->dpc_thread) {
1314 dev_warn(&ha->pdev->dev, "Unable to start DPC thread!\n");
1315 ret = -ENODEV;
1316 goto probe_failed;
1317 }
1318 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
1319
1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler,
1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
1322 if (ret) {
1323 dev_warn(&ha->pdev->dev, "Failed to reserve interrupt %d"
1324 " already in use.\n", pdev->irq);
1325 goto probe_failed;
1326 }
1327 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1328 host->irq = pdev->irq;
1329 DEBUG(printk("scsi%d: irq %d attached\n", ha->host_no, ha->pdev->irq));
1330
1331 qla4xxx_enable_intrs(ha);
1332
1333 /* Start timer thread. */
1334 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
1335
1336 set_bit(AF_INIT_DONE, &ha->flags);
1337
1338 pci_set_drvdata(pdev, ha);
1339
1340 ret = scsi_add_host(host, &pdev->dev);
1341 if (ret)
1342 goto probe_failed;
1343
1344 /* Update transport device information for all devices. */
1345 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
1346 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
1347 if (qla4xxx_add_sess(ddb_entry))
1348 goto remove_host;
1349 }
1350
1351 printk(KERN_INFO
1352 " QLogic iSCSI HBA Driver version: %s\n"
1353 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
1354 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1355 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1356 ha->patch_number, ha->build_number);
1357
1358 return 0;
1359
1360remove_host:
1361 qla4xxx_free_ddb_list(ha);
1362 scsi_remove_host(host);
1363
1364probe_failed:
1365 qla4xxx_free_adapter(ha);
1366 scsi_host_put(ha->host);
1367
1368probe_disable_device:
1369 pci_disable_device(pdev);
1370
1371 return ret;
1372}
1373
1374/**
1375 * qla4xxx_remove_adapter - calback function to remove adapter.
1376 * @pci_dev: PCI device pointer
1377 **/
1378static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1379{
1380 struct scsi_qla_host *ha;
1381
1382 ha = pci_get_drvdata(pdev);
1383
1384 /* remove devs from iscsi_sessions to scsi_devices */
1385 qla4xxx_free_ddb_list(ha);
1386
1387 scsi_remove_host(ha->host);
1388
1389 qla4xxx_free_adapter(ha);
1390
1391 scsi_host_put(ha->host);
1392
1393 pci_set_drvdata(pdev, NULL);
1394}
1395
1396/**
1397 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
1398 * @ha: HA context
1399 *
1400 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1401 * supported addressing method.
1402 */
1403void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
1404{
1405 int retval;
1406
1407 /* Update our PCI device dma_mask for full 64 bit mask */
1408 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
1409 if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1410 dev_dbg(&ha->pdev->dev,
1411 "Failed to set 64 bit PCI consistent mask; "
1412 "using 32 bit.\n");
1413 retval = pci_set_consistent_dma_mask(ha->pdev,
1414 DMA_32BIT_MASK);
1415 }
1416 } else
1417 retval = pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1418}
1419
1420static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1421{
1422 struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
1423 struct ddb_entry *ddb = sess->dd_data;
1424
1425 sdev->hostdata = ddb;
1426 sdev->tagged_supported = 1;
1427 scsi_activate_tcq(sdev, sdev->host->can_queue);
1428 return 0;
1429}
1430
1431static int qla4xxx_slave_configure(struct scsi_device *sdev)
1432{
1433 sdev->tagged_supported = 1;
1434 return 0;
1435}
1436
1437static void qla4xxx_slave_destroy(struct scsi_device *sdev)
1438{
1439 scsi_deactivate_tcq(sdev, 1);
1440}
1441
1442/**
1443 * qla4xxx_del_from_active_array - returns an active srb
1444 * @ha: Pointer to host adapter structure.
1445 * @index: index into to the active_array
1446 *
1447 * This routine removes and returns the srb at the specified index
1448 **/
1449struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
1450{
1451 struct srb *srb = NULL;
1452 struct scsi_cmnd *cmd;
1453
1454 if (!(cmd = scsi_host_find_tag(ha->host, index)))
1455 return srb;
1456
1457 if (!(srb = (struct srb *)cmd->host_scribble))
1458 return srb;
1459
1460 /* update counters */
1461 if (srb->flags & SRB_DMA_VALID) {
1462 ha->req_q_count += srb->iocb_cnt;
1463 ha->iocb_cnt -= srb->iocb_cnt;
1464 if (srb->cmd)
1465 srb->cmd->host_scribble = NULL;
1466 }
1467 return srb;
1468}
1469
1470/**
1471 * qla4xxx_soft_reset - performs a SOFT RESET of hba.
1472 * @ha: Pointer to host adapter structure.
1473 **/
1474int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1475{
1476
1477 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
1478 __func__));
1479 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
1480 int status = QLA_ERROR;
1481
1482 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
1483 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
1484 (qla4010_soft_reset(ha) == QLA_SUCCESS) )
1485 status = QLA_SUCCESS;
1486 return status;
1487 } else
1488 return qla4010_soft_reset(ha);
1489}
1490
1491/**
1492 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1493 * @ha: actual ha whose done queue will contain the comd returned by firmware.
1494 * @cmd: Scsi Command to wait on.
1495 *
1496 * This routine waits for the command to be returned by the Firmware
1497 * for some max time.
1498 **/
1499static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
1500 struct scsi_cmnd *cmd)
1501{
1502 int done = 0;
1503 struct srb *rp;
1504 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
1505
1506 do {
1507 /* Checking to see if its returned to OS */
1508 rp = (struct srb *) cmd->SCp.ptr;
1509 if (rp == NULL) {
1510 done++;
1511 break;
1512 }
1513
1514 msleep(2000);
1515 } while (max_wait_time--);
1516
1517 return done;
1518}
1519
1520/**
1521 * qla4xxx_wait_for_hba_online - waits for HBA to come online
1522 * @ha: Pointer to host adapter structure
1523 **/
1524static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
1525{
1526 unsigned long wait_online;
1527
1528 wait_online = jiffies + (30 * HZ);
1529 while (time_before(jiffies, wait_online)) {
1530
1531 if (adapter_up(ha))
1532 return QLA_SUCCESS;
1533 else if (ha->retry_reset_ha_cnt == 0)
1534 return QLA_ERROR;
1535
1536 msleep(2000);
1537 }
1538
1539 return QLA_ERROR;
1540}
1541
1542/**
1543 * qla4xxx_eh_wait_for_active_target_commands - wait for active cmds to finish.
1544 * @ha: pointer to to HBA
1545 * @t: target id
1546 * @l: lun id
1547 *
1548 * This function waits for all outstanding commands to a lun to complete. It
1549 * returns 0 if all pending commands are returned and 1 otherwise.
1550 **/
1551static int qla4xxx_eh_wait_for_active_target_commands(struct scsi_qla_host *ha,
1552 int t, int l)
1553{
1554 int cnt;
1555 int status = 0;
1556 struct scsi_cmnd *cmd;
1557
1558 /*
1559 * Waiting for all commands for the designated target in the active
1560 * array
1561 */
1562 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
1563 cmd = scsi_host_find_tag(ha->host, cnt);
1564 if (cmd && cmd->device->id == t && cmd->device->lun == l) {
1565 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
1566 status++;
1567 break;
1568 }
1569 }
1570 }
1571 return status;
1572}
1573
1574/**
1575 * qla4xxx_eh_device_reset - callback for target reset.
1576 * @cmd: Pointer to Linux's SCSI command structure
1577 *
1578 * This routine is called by the Linux OS to reset all luns on the
1579 * specified target.
1580 **/
1581static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1582{
1583 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1584 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1585 struct srb *sp;
1586 int ret = FAILED, stat;
1587
1588 sp = (struct srb *) cmd->SCp.ptr;
1589 if (!sp || !ddb_entry)
1590 return ret;
1591
1592 dev_info(&ha->pdev->dev,
1593 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
1594 cmd->device->channel, cmd->device->id, cmd->device->lun);
1595
1596 DEBUG2(printk(KERN_INFO
1597 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1598 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1599 cmd, jiffies, cmd->timeout_per_command / HZ,
1600 ha->dpc_flags, cmd->result, cmd->allowed));
1601
1602 /* FIXME: wait for hba to go online */
1603 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
1604 if (stat != QLA_SUCCESS) {
1605 dev_info(&ha->pdev->dev, "DEVICE RESET FAILED. %d\n", stat);
1606 goto eh_dev_reset_done;
1607 }
1608
1609 /* Send marker. */
1610 ha->marker_needed = 1;
1611
1612 /*
1613 * If we are coming down the EH path, wait for all commands to complete
1614 * for the device.
1615 */
1616 if (cmd->device->host->shost_state == SHOST_RECOVERY) {
1617 if (qla4xxx_eh_wait_for_active_target_commands(ha,
1618 cmd->device->id,
1619 cmd->device->lun)){
1620 dev_info(&ha->pdev->dev,
1621 "DEVICE RESET FAILED - waiting for "
1622 "commands.\n");
1623 goto eh_dev_reset_done;
1624 }
1625 }
1626
1627 dev_info(&ha->pdev->dev,
1628 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
1629 ha->host_no, cmd->device->channel, cmd->device->id,
1630 cmd->device->lun);
1631
1632 ret = SUCCESS;
1633
1634eh_dev_reset_done:
1635
1636 return ret;
1637}
1638
1639/**
1640 * qla4xxx_eh_host_reset - kernel callback
1641 * @cmd: Pointer to Linux's SCSI command structure
1642 *
1643 * This routine is invoked by the Linux kernel to perform fatal error
1644 * recovery on the specified adapter.
1645 **/
1646static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1647{
1648 int return_status = FAILED;
1649 struct scsi_qla_host *ha;
1650
1651 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
1652
1653 dev_info(&ha->pdev->dev,
1654 "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no,
1655 cmd->device->channel, cmd->device->id, cmd->device->lun);
1656
1657 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
1658 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
1659 "DEAD.\n", ha->host_no, cmd->device->channel,
1660 __func__));
1661
1662 return FAILED;
1663 }
1664
1665 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) {
1666 return_status = SUCCESS;
1667 }
1668
1669 dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
1670 return_status == FAILED ? "FAILED" : "SUCCEDED");
1671
1672 return return_status;
1673}
1674
1675
1676static struct pci_device_id qla4xxx_pci_tbl[] = {
1677 {
1678 .vendor = PCI_VENDOR_ID_QLOGIC,
1679 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
1680 .subvendor = PCI_ANY_ID,
1681 .subdevice = PCI_ANY_ID,
1682 },
1683 {
1684 .vendor = PCI_VENDOR_ID_QLOGIC,
1685 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
1686 .subvendor = PCI_ANY_ID,
1687 .subdevice = PCI_ANY_ID,
1688 },
1689 {0, 0},
1690};
1691MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
1692
1693struct pci_driver qla4xxx_pci_driver = {
1694 .name = DRIVER_NAME,
1695 .id_table = qla4xxx_pci_tbl,
1696 .probe = qla4xxx_probe_adapter,
1697 .remove = qla4xxx_remove_adapter,
1698};
1699
1700static int __init qla4xxx_module_init(void)
1701{
1702 int ret;
1703
1704 /* Allocate cache for SRBs. */
1705 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
1706 SLAB_HWCACHE_ALIGN, NULL, NULL);
1707 if (srb_cachep == NULL) {
1708 printk(KERN_ERR
1709 "%s: Unable to allocate SRB cache..."
1710 "Failing load!\n", DRIVER_NAME);
1711 ret = -ENOMEM;
1712 goto no_srp_cache;
1713 }
1714
1715 /* Derive version string. */
1716 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
1717 if (extended_error_logging)
1718 strcat(qla4xxx_version_str, "-debug");
1719
1720 qla4xxx_scsi_transport =
1721 iscsi_register_transport(&qla4xxx_iscsi_transport);
1722 if (!qla4xxx_scsi_transport){
1723 ret = -ENODEV;
1724 goto release_srb_cache;
1725 }
1726
1727 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
1728 ret = pci_register_driver(&qla4xxx_pci_driver);
1729 if (ret)
1730 goto unregister_transport;
1731
1732 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
1733 return 0;
1734unregister_transport:
1735 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1736release_srb_cache:
1737 kmem_cache_destroy(srb_cachep);
1738no_srp_cache:
1739 return ret;
1740}
1741
1742static void __exit qla4xxx_module_exit(void)
1743{
1744 pci_unregister_driver(&qla4xxx_pci_driver);
1745 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1746 kmem_cache_destroy(srb_cachep);
1747}
1748
1749module_init(qla4xxx_module_init);
1750module_exit(qla4xxx_module_exit);
1751
1752MODULE_AUTHOR("QLogic Corporation");
1753MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
1754MODULE_LICENSE("GPL");
1755MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
new file mode 100644
index 00000000000..b3fe7e68988
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -0,0 +1,13 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
9
10#define QL4_DRIVER_MAJOR_VER 5
11#define QL4_DRIVER_MINOR_VER 0
12#define QL4_DRIVER_PATCH_VER 5
13#define QL4_DRIVER_BETA_VER 9
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 327b33a57b0..86e13183c9b 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -215,18 +215,19 @@ static void raid_component_release(struct class_device *cdev)
215 kfree(rc); 215 kfree(rc);
216} 216}
217 217
218void raid_component_add(struct raid_template *r,struct device *raid_dev, 218int raid_component_add(struct raid_template *r,struct device *raid_dev,
219 struct device *component_dev) 219 struct device *component_dev)
220{ 220{
221 struct class_device *cdev = 221 struct class_device *cdev =
222 attribute_container_find_class_device(&r->raid_attrs.ac, 222 attribute_container_find_class_device(&r->raid_attrs.ac,
223 raid_dev); 223 raid_dev);
224 struct raid_component *rc; 224 struct raid_component *rc;
225 struct raid_data *rd = class_get_devdata(cdev); 225 struct raid_data *rd = class_get_devdata(cdev);
226 int err;
226 227
227 rc = kzalloc(sizeof(*rc), GFP_KERNEL); 228 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
228 if (!rc) 229 if (!rc)
229 return; 230 return -ENOMEM;
230 231
231 INIT_LIST_HEAD(&rc->node); 232 INIT_LIST_HEAD(&rc->node);
232 class_device_initialize(&rc->cdev); 233 class_device_initialize(&rc->cdev);
@@ -239,7 +240,18 @@ void raid_component_add(struct raid_template *r,struct device *raid_dev,
239 list_add_tail(&rc->node, &rd->component_list); 240 list_add_tail(&rc->node, &rd->component_list);
240 rc->cdev.parent = cdev; 241 rc->cdev.parent = cdev;
241 rc->cdev.class = &raid_class.class; 242 rc->cdev.class = &raid_class.class;
242 class_device_add(&rc->cdev); 243 err = class_device_add(&rc->cdev);
244 if (err)
245 goto err_out;
246
247 return 0;
248
249err_out:
250 list_del(&rc->node);
251 rd->component_count--;
252 put_device(component_dev);
253 kfree(rc);
254 return err;
243} 255}
244EXPORT_SYMBOL(raid_component_add); 256EXPORT_SYMBOL(raid_component_add);
245 257
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index da95bce907d..c59f31533ab 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -128,7 +128,7 @@ const char * scsi_device_type(unsigned type)
128 return "Well-known LUN "; 128 return "Well-known LUN ";
129 if (type == 0x1f) 129 if (type == 0x1f)
130 return "No Device "; 130 return "No Device ";
131 if (type > ARRAY_SIZE(scsi_device_types)) 131 if (type >= ARRAY_SIZE(scsi_device_types))
132 return "Unknown "; 132 return "Unknown ";
133 return scsi_device_types[type]; 133 return scsi_device_types[type];
134} 134}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3d0429bc14a..ce63044b1ec 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -150,6 +150,7 @@ static struct {
150 {"DELL", "PERCRAID", NULL, BLIST_FORCELUN}, 150 {"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
151 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ 151 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
152 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ 152 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
153 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
153 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, 154 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
154 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, 155 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
155 {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 156 {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -161,6 +162,11 @@ static struct {
161 {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, 162 {"HITACHI", "DF600", "*", BLIST_SPARSELUN},
162 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 163 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
163 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 164 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
165 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
166 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
167 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
168 {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
169 {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ 170 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
165 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ 171 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
166 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, 172 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
@@ -168,6 +174,14 @@ static struct {
168 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 174 {"HP", "C1557A", NULL, BLIST_FORCELUN},
169 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 175 {"HP", "C3323-300", "4269", BLIST_NOTQ},
170 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, 176 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
177 {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
178 {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
179 {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
180 {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
181 {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
182 {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
183 {"HP", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
184 {"HP", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
171 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, 185 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
172 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 186 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
173 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, 187 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
@@ -188,6 +202,7 @@ static struct {
188 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 202 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
189 {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 203 {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
190 {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 204 {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
205 {"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
191 {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 206 {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
192 {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 207 {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
193 {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 208 {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
@@ -210,6 +225,7 @@ static struct {
210 {"SUN", "T300", "*", BLIST_SPARSELUN}, 225 {"SUN", "T300", "*", BLIST_SPARSELUN},
211 {"SUN", "T4", "*", BLIST_SPARSELUN}, 226 {"SUN", "T4", "*", BLIST_SPARSELUN},
212 {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN}, 227 {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
228 {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
213 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, 229 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
214 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, 230 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
215 {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, 231 {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 71084728eb4..743f67ed764 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -426,7 +426,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
426free_req: 426free_req:
427 blk_put_request(req); 427 blk_put_request(req);
428free_sense: 428free_sense:
429 kfree(sioc); 429 kmem_cache_free(scsi_io_context_cache, sioc);
430 return DRIVER_ERROR << 24; 430 return DRIVER_ERROR << 24;
431} 431}
432EXPORT_SYMBOL_GPL(scsi_execute_async); 432EXPORT_SYMBOL_GPL(scsi_execute_async);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 10bc99c911f..84ff203ffed 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1794,7 +1794,7 @@ static void sd_shutdown(struct device *dev)
1794 **/ 1794 **/
1795static int __init init_sd(void) 1795static int __init init_sd(void)
1796{ 1796{
1797 int majors = 0, i; 1797 int majors = 0, i, err;
1798 1798
1799 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 1799 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
1800 1800
@@ -1805,9 +1805,22 @@ static int __init init_sd(void)
1805 if (!majors) 1805 if (!majors)
1806 return -ENODEV; 1806 return -ENODEV;
1807 1807
1808 class_register(&sd_disk_class); 1808 err = class_register(&sd_disk_class);
1809 if (err)
1810 goto err_out;
1809 1811
1810 return scsi_register_driver(&sd_template.gendrv); 1812 err = scsi_register_driver(&sd_template.gendrv);
1813 if (err)
1814 goto err_out_class;
1815
1816 return 0;
1817
1818err_out_class:
1819 class_unregister(&sd_disk_class);
1820err_out:
1821 for (i = 0; i < SD_MAJORS; i++)
1822 unregister_blkdev(sd_major(i), "sd");
1823 return err;
1811} 1824}
1812 1825
1813/** 1826/**
@@ -1822,10 +1835,10 @@ static void __exit exit_sd(void)
1822 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 1835 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
1823 1836
1824 scsi_unregister_driver(&sd_template.gendrv); 1837 scsi_unregister_driver(&sd_template.gendrv);
1838 class_unregister(&sd_disk_class);
1839
1825 for (i = 0; i < SD_MAJORS; i++) 1840 for (i = 0; i < SD_MAJORS; i++)
1826 unregister_blkdev(sd_major(i), "sd"); 1841 unregister_blkdev(sd_major(i), "sd");
1827
1828 class_unregister(&sd_disk_class);
1829} 1842}
1830 1843
1831module_init(init_sd); 1844module_init(init_sd);
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 2679ea8bff1..4e6666ceae2 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -94,7 +94,6 @@
94#include <linux/string.h> 94#include <linux/string.h>
95#include <linux/proc_fs.h> 95#include <linux/proc_fs.h>
96#include <linux/init.h> 96#include <linux/init.h>
97#include <linux/delay.h>
98#include <linux/blkdev.h> 97#include <linux/blkdev.h>
99#include <linux/stat.h> 98#include <linux/stat.h>
100#include <linux/delay.h> 99#include <linux/delay.h>
@@ -103,12 +102,13 @@
103#include <asm/system.h> 102#include <asm/system.h>
104#include <asm/uaccess.h> 103#include <asm/uaccess.h>
105 104
106#include "scsi.h" 105#include <scsi/scsi_cmnd.h>
106#include <scsi/scsi_device.h>
107#include <scsi/scsi.h>
108
107#include <scsi/scsi_dbg.h> 109#include <scsi/scsi_dbg.h>
108#include <scsi/scsi_host.h> 110#include <scsi/scsi_host.h>
109#include "seagate.h"
110 111
111#include <scsi/scsi_ioctl.h>
112 112
113#ifdef DEBUG 113#ifdef DEBUG
114#define DPRINTK( when, msg... ) do { if ( (DEBUG & (when)) == (when) ) printk( msg ); } while (0) 114#define DPRINTK( when, msg... ) do { if ( (DEBUG & (when)) == (when) ) printk( msg ); } while (0)
@@ -322,6 +322,7 @@ static Signature __initdata signatures[] = {
322static int hostno = -1; 322static int hostno = -1;
323static void seagate_reconnect_intr (int, void *, struct pt_regs *); 323static void seagate_reconnect_intr (int, void *, struct pt_regs *);
324static irqreturn_t do_seagate_reconnect_intr (int, void *, struct pt_regs *); 324static irqreturn_t do_seagate_reconnect_intr (int, void *, struct pt_regs *);
325static int seagate_st0x_bus_reset(struct scsi_cmnd *);
325 326
326#ifdef FAST 327#ifdef FAST
327static int fast = 1; 328static int fast = 1;
@@ -585,8 +586,8 @@ static int linked_connected = 0;
585static unsigned char linked_target, linked_lun; 586static unsigned char linked_target, linked_lun;
586#endif 587#endif
587 588
588static void (*done_fn) (Scsi_Cmnd *) = NULL; 589static void (*done_fn) (struct scsi_cmnd *) = NULL;
589static Scsi_Cmnd *SCint = NULL; 590static struct scsi_cmnd *SCint = NULL;
590 591
591/* 592/*
592 * These control whether or not disconnect / reconnect will be attempted, 593 * These control whether or not disconnect / reconnect will be attempted,
@@ -633,7 +634,7 @@ static irqreturn_t do_seagate_reconnect_intr(int irq, void *dev_id,
633static void seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs) 634static void seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
634{ 635{
635 int temp; 636 int temp;
636 Scsi_Cmnd *SCtmp; 637 struct scsi_cmnd *SCtmp;
637 638
638 DPRINTK (PHASE_RESELECT, "scsi%d : seagate_reconnect_intr() called\n", hostno); 639 DPRINTK (PHASE_RESELECT, "scsi%d : seagate_reconnect_intr() called\n", hostno);
639 640
@@ -675,10 +676,11 @@ static void seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
675 676
676static int recursion_depth = 0; 677static int recursion_depth = 0;
677 678
678static int seagate_st0x_queue_command (Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) 679static int seagate_st0x_queue_command(struct scsi_cmnd * SCpnt,
680 void (*done) (struct scsi_cmnd *))
679{ 681{
680 int result, reconnect; 682 int result, reconnect;
681 Scsi_Cmnd *SCtmp; 683 struct scsi_cmnd *SCtmp;
682 684
683 DANY ("seagate: que_command"); 685 DANY ("seagate: que_command");
684 done_fn = done; 686 done_fn = done;
@@ -1609,7 +1611,7 @@ connect_loop:
1609 return retcode (st0x_aborted); 1611 return retcode (st0x_aborted);
1610} /* end of internal_command */ 1612} /* end of internal_command */
1611 1613
1612static int seagate_st0x_abort (Scsi_Cmnd * SCpnt) 1614static int seagate_st0x_abort(struct scsi_cmnd * SCpnt)
1613{ 1615{
1614 st0x_aborted = DID_ABORT; 1616 st0x_aborted = DID_ABORT;
1615 return SUCCESS; 1617 return SUCCESS;
@@ -1624,7 +1626,7 @@ static int seagate_st0x_abort (Scsi_Cmnd * SCpnt)
1624 * May be called with SCpnt = NULL 1626 * May be called with SCpnt = NULL
1625 */ 1627 */
1626 1628
1627static int seagate_st0x_bus_reset(Scsi_Cmnd * SCpnt) 1629static int seagate_st0x_bus_reset(struct scsi_cmnd * SCpnt)
1628{ 1630{
1629 /* No timeouts - this command is going to fail because it was reset. */ 1631 /* No timeouts - this command is going to fail because it was reset. */
1630 DANY ("scsi%d: Reseting bus... ", hostno); 1632 DANY ("scsi%d: Reseting bus... ", hostno);
diff --git a/drivers/scsi/seagate.h b/drivers/scsi/seagate.h
deleted file mode 100644
index fb5f380fa4b..00000000000
--- a/drivers/scsi/seagate.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * seagate.h Copyright (C) 1992 Drew Eckhardt
3 * low level scsi driver header for ST01/ST02 by
4 * Drew Eckhardt
5 *
6 * <drew@colorado.edu>
7 */
8
9#ifndef _SEAGATE_H
10#define SEAGATE_H
11
12static int seagate_st0x_detect(struct scsi_host_template *);
13static int seagate_st0x_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
14
15static int seagate_st0x_abort(Scsi_Cmnd *);
16static const char *seagate_st0x_info(struct Scsi_Host *);
17static int seagate_st0x_bus_reset(Scsi_Cmnd *);
18
19#endif /* _SEAGATE_H */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 34f9343ed0a..3f8b9318856 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -60,7 +60,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
60 60
61#ifdef CONFIG_SCSI_PROC_FS 61#ifdef CONFIG_SCSI_PROC_FS
62#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
63static char *sg_version_date = "20060818"; 63static char *sg_version_date = "20060920";
64 64
65static int sg_proc_init(void); 65static int sg_proc_init(void);
66static void sg_proc_cleanup(void); 66static void sg_proc_cleanup(void);
@@ -94,6 +94,9 @@ int sg_big_buff = SG_DEF_RESERVED_SIZE;
94static int def_reserved_size = -1; /* picks up init parameter */ 94static int def_reserved_size = -1; /* picks up init parameter */
95static int sg_allow_dio = SG_ALLOW_DIO_DEF; 95static int sg_allow_dio = SG_ALLOW_DIO_DEF;
96 96
97static int scatter_elem_sz = SG_SCATTER_SZ;
98static int scatter_elem_sz_prev = SG_SCATTER_SZ;
99
97#define SG_SECTOR_SZ 512 100#define SG_SECTOR_SZ 512
98#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) 101#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
99 102
@@ -1537,11 +1540,9 @@ sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1537 msleep(10); /* dirty detach so delay device destruction */ 1540 msleep(10); /* dirty detach so delay device destruction */
1538} 1541}
1539 1542
1540/* Set 'perm' (4th argument) to 0 to disable module_param's definition 1543module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1541 * of sysfs parameters (which module_param doesn't yet support). 1544module_param_named(def_reserved_size, def_reserved_size, int,
1542 * Sysfs parameters defined explicitly below. 1545 S_IRUGO | S_IWUSR);
1543 */
1544module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1545module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1546module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1546 1547
1547MODULE_AUTHOR("Douglas Gilbert"); 1548MODULE_AUTHOR("Douglas Gilbert");
@@ -1550,6 +1551,8 @@ MODULE_LICENSE("GPL");
1550MODULE_VERSION(SG_VERSION_STR); 1551MODULE_VERSION(SG_VERSION_STR);
1551MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); 1552MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1552 1553
1554MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1555 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1553MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1556MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1554MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1557MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1555 1558
@@ -1558,8 +1561,14 @@ init_sg(void)
1558{ 1561{
1559 int rc; 1562 int rc;
1560 1563
1564 if (scatter_elem_sz < PAGE_SIZE) {
1565 scatter_elem_sz = PAGE_SIZE;
1566 scatter_elem_sz_prev = scatter_elem_sz;
1567 }
1561 if (def_reserved_size >= 0) 1568 if (def_reserved_size >= 0)
1562 sg_big_buff = def_reserved_size; 1569 sg_big_buff = def_reserved_size;
1570 else
1571 def_reserved_size = sg_big_buff;
1563 1572
1564 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1573 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1565 SG_MAX_DEVS, "sg"); 1574 SG_MAX_DEVS, "sg");
@@ -1842,15 +1851,30 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1842 if (mx_sc_elems < 0) 1851 if (mx_sc_elems < 0)
1843 return mx_sc_elems; /* most likely -ENOMEM */ 1852 return mx_sc_elems; /* most likely -ENOMEM */
1844 1853
1854 num = scatter_elem_sz;
1855 if (unlikely(num != scatter_elem_sz_prev)) {
1856 if (num < PAGE_SIZE) {
1857 scatter_elem_sz = PAGE_SIZE;
1858 scatter_elem_sz_prev = PAGE_SIZE;
1859 } else
1860 scatter_elem_sz_prev = num;
1861 }
1845 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1862 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1846 (rem_sz > 0) && (k < mx_sc_elems); 1863 (rem_sz > 0) && (k < mx_sc_elems);
1847 ++k, rem_sz -= ret_sz, ++sg) { 1864 ++k, rem_sz -= ret_sz, ++sg) {
1848 1865
1849 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; 1866 num = (rem_sz > scatter_elem_sz_prev) ?
1867 scatter_elem_sz_prev : rem_sz;
1850 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1868 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1851 if (!p) 1869 if (!p)
1852 return -ENOMEM; 1870 return -ENOMEM;
1853 1871
1872 if (num == scatter_elem_sz_prev) {
1873 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1874 scatter_elem_sz = ret_sz;
1875 scatter_elem_sz_prev = ret_sz;
1876 }
1877 }
1854 sg->page = p; 1878 sg->page = p;
1855 sg->length = ret_sz; 1879 sg->length = ret_sz;
1856 1880
@@ -2341,6 +2365,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2341 } 2365 }
2342 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2366 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2343 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2367 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2368 if (unlikely(sg_big_buff != def_reserved_size))
2369 sg_big_buff = def_reserved_size;
2370
2344 sg_build_reserve(sfp, sg_big_buff); 2371 sg_build_reserve(sfp, sg_big_buff);
2345 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2372 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2346 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2373 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
@@ -2437,16 +2464,16 @@ sg_res_in_use(Sg_fd * sfp)
2437 return srp ? 1 : 0; 2464 return srp ? 1 : 0;
2438} 2465}
2439 2466
2440/* If retSzp==NULL want exact size or fail */ 2467/* The size fetched (value output via retSzp) set when non-NULL return */
2441static struct page * 2468static struct page *
2442sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2469sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2443{ 2470{
2444 struct page *resp = NULL; 2471 struct page *resp = NULL;
2445 gfp_t page_mask; 2472 gfp_t page_mask;
2446 int order, a_size; 2473 int order, a_size;
2447 int resSz = rqSz; 2474 int resSz;
2448 2475
2449 if (rqSz <= 0) 2476 if ((rqSz <= 0) || (NULL == retSzp))
2450 return resp; 2477 return resp;
2451 2478
2452 if (lowDma) 2479 if (lowDma)
@@ -2456,8 +2483,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2456 2483
2457 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2484 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2458 order++, a_size <<= 1) ; 2485 order++, a_size <<= 1) ;
2486 resSz = a_size; /* rounded up if necessary */
2459 resp = alloc_pages(page_mask, order); 2487 resp = alloc_pages(page_mask, order);
2460 while ((!resp) && order && retSzp) { 2488 while ((!resp) && order) {
2461 --order; 2489 --order;
2462 a_size >>= 1; /* divide by 2, until PAGE_SIZE */ 2490 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2463 resp = alloc_pages(page_mask, order); /* try half */ 2491 resp = alloc_pages(page_mask, order); /* try half */
@@ -2466,8 +2494,7 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2466 if (resp) { 2494 if (resp) {
2467 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2495 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2468 memset(page_address(resp), 0, resSz); 2496 memset(page_address(resp), 0, resSz);
2469 if (retSzp) 2497 *retSzp = resSz;
2470 *retSzp = resSz;
2471 } 2498 }
2472 return resp; 2499 return resp;
2473} 2500}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7f669b60067..3babdc76b3f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -195,9 +195,9 @@ static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int);
195static int st_probe(struct device *); 195static int st_probe(struct device *);
196static int st_remove(struct device *); 196static int st_remove(struct device *);
197 197
198static void do_create_driverfs_files(void); 198static int do_create_driverfs_files(void);
199static void do_remove_driverfs_files(void); 199static void do_remove_driverfs_files(void);
200static void do_create_class_files(struct scsi_tape *, int, int); 200static int do_create_class_files(struct scsi_tape *, int, int);
201 201
202static struct scsi_driver st_template = { 202static struct scsi_driver st_template = {
203 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
@@ -4048,7 +4048,9 @@ static int st_probe(struct device *dev)
4048 STm->cdevs[j] = cdev; 4048 STm->cdevs[j] = cdev;
4049 4049
4050 } 4050 }
4051 do_create_class_files(tpnt, dev_num, mode); 4051 error = do_create_class_files(tpnt, dev_num, mode);
4052 if (error)
4053 goto out_free_tape;
4052 } 4054 }
4053 4055
4054 sdev_printk(KERN_WARNING, SDp, 4056 sdev_printk(KERN_WARNING, SDp,
@@ -4157,32 +4159,45 @@ static void scsi_tape_release(struct kref *kref)
4157 4159
4158static int __init init_st(void) 4160static int __init init_st(void)
4159{ 4161{
4162 int err;
4163
4160 validate_options(); 4164 validate_options();
4161 4165
4162 printk(KERN_INFO 4166 printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
4163 "st: Version %s, fixed bufsize %d, s/g segs %d\n",
4164 verstr, st_fixed_buffer_size, st_max_sg_segs); 4167 verstr, st_fixed_buffer_size, st_max_sg_segs);
4165 4168
4166 st_sysfs_class = class_create(THIS_MODULE, "scsi_tape"); 4169 st_sysfs_class = class_create(THIS_MODULE, "scsi_tape");
4167 if (IS_ERR(st_sysfs_class)) { 4170 if (IS_ERR(st_sysfs_class)) {
4168 st_sysfs_class = NULL;
4169 printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n"); 4171 printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n");
4170 return 1; 4172 return PTR_ERR(st_sysfs_class);
4171 } 4173 }
4172 4174
4173 if (!register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4175 err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4174 ST_MAX_TAPE_ENTRIES, "st")) { 4176 ST_MAX_TAPE_ENTRIES, "st");
4175 if (scsi_register_driver(&st_template.gendrv) == 0) { 4177 if (err) {
4176 do_create_driverfs_files(); 4178 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n",
4177 return 0; 4179 SCSI_TAPE_MAJOR);
4178 } 4180 goto err_class;
4179 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4180 ST_MAX_TAPE_ENTRIES);
4181 } 4181 }
4182 class_destroy(st_sysfs_class);
4183 4182
4184 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n", SCSI_TAPE_MAJOR); 4183 err = scsi_register_driver(&st_template.gendrv);
4185 return 1; 4184 if (err)
4185 goto err_chrdev;
4186
4187 err = do_create_driverfs_files();
4188 if (err)
4189 goto err_scsidrv;
4190
4191 return 0;
4192
4193err_scsidrv:
4194 scsi_unregister_driver(&st_template.gendrv);
4195err_chrdev:
4196 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4197 ST_MAX_TAPE_ENTRIES);
4198err_class:
4199 class_destroy(st_sysfs_class);
4200 return err;
4186} 4201}
4187 4202
4188static void __exit exit_st(void) 4203static void __exit exit_st(void)
@@ -4225,14 +4240,33 @@ static ssize_t st_version_show(struct device_driver *ddd, char *buf)
4225} 4240}
4226static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL); 4241static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
4227 4242
4228static void do_create_driverfs_files(void) 4243static int do_create_driverfs_files(void)
4229{ 4244{
4230 struct device_driver *driverfs = &st_template.gendrv; 4245 struct device_driver *driverfs = &st_template.gendrv;
4246 int err;
4247
4248 err = driver_create_file(driverfs, &driver_attr_try_direct_io);
4249 if (err)
4250 return err;
4251 err = driver_create_file(driverfs, &driver_attr_fixed_buffer_size);
4252 if (err)
4253 goto err_try_direct_io;
4254 err = driver_create_file(driverfs, &driver_attr_max_sg_segs);
4255 if (err)
4256 goto err_attr_fixed_buf;
4257 err = driver_create_file(driverfs, &driver_attr_version);
4258 if (err)
4259 goto err_attr_max_sg;
4231 4260
4232 driver_create_file(driverfs, &driver_attr_try_direct_io); 4261 return 0;
4233 driver_create_file(driverfs, &driver_attr_fixed_buffer_size); 4262
4234 driver_create_file(driverfs, &driver_attr_max_sg_segs); 4263err_attr_max_sg:
4235 driver_create_file(driverfs, &driver_attr_version); 4264 driver_remove_file(driverfs, &driver_attr_max_sg_segs);
4265err_attr_fixed_buf:
4266 driver_remove_file(driverfs, &driver_attr_fixed_buffer_size);
4267err_try_direct_io:
4268 driver_remove_file(driverfs, &driver_attr_try_direct_io);
4269 return err;
4236} 4270}
4237 4271
4238static void do_remove_driverfs_files(void) 4272static void do_remove_driverfs_files(void)
@@ -4293,15 +4327,12 @@ static ssize_t st_defcompression_show(struct class_device *class_dev, char *buf)
4293 4327
4294CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL); 4328CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
4295 4329
4296static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) 4330static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4297{ 4331{
4298 int i, rew, error; 4332 int i, rew, error;
4299 char name[10]; 4333 char name[10];
4300 struct class_device *st_class_member; 4334 struct class_device *st_class_member;
4301 4335
4302 if (!st_sysfs_class)
4303 return;
4304
4305 for (rew=0; rew < 2; rew++) { 4336 for (rew=0; rew < 2; rew++) {
4306 /* Make sure that the minor numbers corresponding to the four 4337 /* Make sure that the minor numbers corresponding to the four
4307 first modes always get the same names */ 4338 first modes always get the same names */
@@ -4316,18 +4347,24 @@ static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4316 if (IS_ERR(st_class_member)) { 4347 if (IS_ERR(st_class_member)) {
4317 printk(KERN_WARNING "st%d: class_device_create failed\n", 4348 printk(KERN_WARNING "st%d: class_device_create failed\n",
4318 dev_num); 4349 dev_num);
4350 error = PTR_ERR(st_class_member);
4319 goto out; 4351 goto out;
4320 } 4352 }
4321 class_set_devdata(st_class_member, &STp->modes[mode]); 4353 class_set_devdata(st_class_member, &STp->modes[mode]);
4322 4354
4323 class_device_create_file(st_class_member, 4355 error = class_device_create_file(st_class_member,
4324 &class_device_attr_defined); 4356 &class_device_attr_defined);
4325 class_device_create_file(st_class_member, 4357 if (error) goto out;
4326 &class_device_attr_default_blksize); 4358 error = class_device_create_file(st_class_member,
4327 class_device_create_file(st_class_member, 4359 &class_device_attr_default_blksize);
4328 &class_device_attr_default_density); 4360 if (error) goto out;
4329 class_device_create_file(st_class_member, 4361 error = class_device_create_file(st_class_member,
4330 &class_device_attr_default_compression); 4362 &class_device_attr_default_density);
4363 if (error) goto out;
4364 error = class_device_create_file(st_class_member,
4365 &class_device_attr_default_compression);
4366 if (error) goto out;
4367
4331 if (mode == 0 && rew == 0) { 4368 if (mode == 0 && rew == 0) {
4332 error = sysfs_create_link(&STp->device->sdev_gendev.kobj, 4369 error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
4333 &st_class_member->kobj, 4370 &st_class_member->kobj,
@@ -4336,11 +4373,15 @@ static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4336 printk(KERN_ERR 4373 printk(KERN_ERR
4337 "st%d: Can't create sysfs link from SCSI device.\n", 4374 "st%d: Can't create sysfs link from SCSI device.\n",
4338 dev_num); 4375 dev_num);
4376 goto out;
4339 } 4377 }
4340 } 4378 }
4341 } 4379 }
4342 out: 4380
4343 return; 4381 return 0;
4382
4383out:
4384 return error;
4344} 4385}
4345 4386
4346/* The following functions may be useful for a larger audience. */ 4387/* The following functions may be useful for a larger audience. */
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3cf3106a29b..a54e6c1026b 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -11,7 +11,7 @@
11 * Written By: 11 * Written By:
12 * Ed Lin <promise_linux@promise.com> 12 * Ed Lin <promise_linux@promise.com>
13 * 13 *
14 * Version: 2.9.0.13 14 * Version: 3.0.0.1
15 * 15 *
16 */ 16 */
17 17
@@ -37,11 +37,11 @@
37#include <scsi/scsi_tcq.h> 37#include <scsi/scsi_tcq.h>
38 38
39#define DRV_NAME "stex" 39#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "2.9.0.13" 40#define ST_DRIVER_VERSION "3.0.0.1"
41#define ST_VER_MAJOR 2 41#define ST_VER_MAJOR 3
42#define ST_VER_MINOR 9 42#define ST_VER_MINOR 0
43#define ST_OEM 0 43#define ST_OEM 0
44#define ST_BUILD_VER 13 44#define ST_BUILD_VER 1
45 45
46enum { 46enum {
47 /* MU register offset */ 47 /* MU register offset */
@@ -120,12 +120,18 @@ enum {
120 120
121 st_shasta = 0, 121 st_shasta = 0,
122 st_vsc = 1, 122 st_vsc = 1,
123 st_yosemite = 2,
123 124
124 PASSTHRU_REQ_TYPE = 0x00000001, 125 PASSTHRU_REQ_TYPE = 0x00000001,
125 PASSTHRU_REQ_NO_WAKEUP = 0x00000100, 126 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
126 ST_INTERNAL_TIMEOUT = 30, 127 ST_INTERNAL_TIMEOUT = 30,
127 128
129 ST_TO_CMD = 0,
130 ST_FROM_CMD = 1,
131
128 /* vendor specific commands of Promise */ 132 /* vendor specific commands of Promise */
133 MGT_CMD = 0xd8,
134 SINBAND_MGT_CMD = 0xd9,
129 ARRAY_CMD = 0xe0, 135 ARRAY_CMD = 0xe0,
130 CONTROLLER_CMD = 0xe1, 136 CONTROLLER_CMD = 0xe1,
131 DEBUGGING_CMD = 0xe2, 137 DEBUGGING_CMD = 0xe2,
@@ -133,14 +139,48 @@ enum {
133 139
134 PASSTHRU_GET_ADAPTER = 0x05, 140 PASSTHRU_GET_ADAPTER = 0x05,
135 PASSTHRU_GET_DRVVER = 0x10, 141 PASSTHRU_GET_DRVVER = 0x10,
142
143 CTLR_CONFIG_CMD = 0x03,
144 CTLR_SHUTDOWN = 0x0d,
145
136 CTLR_POWER_STATE_CHANGE = 0x0e, 146 CTLR_POWER_STATE_CHANGE = 0x0e,
137 CTLR_POWER_SAVING = 0x01, 147 CTLR_POWER_SAVING = 0x01,
138 148
139 PASSTHRU_SIGNATURE = 0x4e415041, 149 PASSTHRU_SIGNATURE = 0x4e415041,
150 MGT_CMD_SIGNATURE = 0xba,
140 151
141 INQUIRY_EVPD = 0x01, 152 INQUIRY_EVPD = 0x01,
142}; 153};
143 154
155/* SCSI inquiry data */
156typedef struct st_inq {
157 u8 DeviceType :5;
158 u8 DeviceTypeQualifier :3;
159 u8 DeviceTypeModifier :7;
160 u8 RemovableMedia :1;
161 u8 Versions;
162 u8 ResponseDataFormat :4;
163 u8 HiSupport :1;
164 u8 NormACA :1;
165 u8 ReservedBit :1;
166 u8 AERC :1;
167 u8 AdditionalLength;
168 u8 Reserved[2];
169 u8 SoftReset :1;
170 u8 CommandQueue :1;
171 u8 Reserved2 :1;
172 u8 LinkedCommands :1;
173 u8 Synchronous :1;
174 u8 Wide16Bit :1;
175 u8 Wide32Bit :1;
176 u8 RelativeAddressing :1;
177 u8 VendorId[8];
178 u8 ProductId[16];
179 u8 ProductRevisionLevel[4];
180 u8 VendorSpecific[20];
181 u8 Reserved3[40];
182} ST_INQ;
183
144struct st_sgitem { 184struct st_sgitem {
145 u8 ctrl; /* SG_CF_xxx */ 185 u8 ctrl; /* SG_CF_xxx */
146 u8 reserved[3]; 186 u8 reserved[3];
@@ -181,7 +221,7 @@ struct req_msg {
181 u8 task_attr; 221 u8 task_attr;
182 u8 task_manage; 222 u8 task_manage;
183 u8 prd_entry; 223 u8 prd_entry;
184 u8 payload_sz; /* payload size in 4-byte */ 224 u8 payload_sz; /* payload size in 4-byte, not used */
185 u8 cdb[STEX_CDB_LENGTH]; 225 u8 cdb[STEX_CDB_LENGTH];
186 u8 variable[REQ_VARIABLE_LEN]; 226 u8 variable[REQ_VARIABLE_LEN];
187}; 227};
@@ -242,7 +282,8 @@ struct st_drvver {
242#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg)) 282#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
243#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg)) 283#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
244#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE) 284#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
245#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame)) 285#define STEX_EXTRA_SIZE max(sizeof(struct st_frame), sizeof(ST_INQ))
286#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + STEX_EXTRA_SIZE)
246 287
247struct st_ccb { 288struct st_ccb {
248 struct req_msg *req; 289 struct req_msg *req;
@@ -403,7 +444,7 @@ static int stex_map_sg(struct st_hba *hba,
403} 444}
404 445
405static void stex_internal_copy(struct scsi_cmnd *cmd, 446static void stex_internal_copy(struct scsi_cmnd *cmd,
406 const void *src, size_t *count, int sg_count) 447 const void *src, size_t *count, int sg_count, int direction)
407{ 448{
408 size_t lcount; 449 size_t lcount;
409 size_t len; 450 size_t len;
@@ -427,7 +468,10 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
427 } else 468 } else
428 d = cmd->request_buffer; 469 d = cmd->request_buffer;
429 470
430 memcpy(d, s, len); 471 if (direction == ST_TO_CMD)
472 memcpy(d, s, len);
473 else
474 memcpy(s, d, len);
431 475
432 lcount -= len; 476 lcount -= len;
433 if (cmd->use_sg) 477 if (cmd->use_sg)
@@ -449,7 +493,7 @@ static int stex_direct_copy(struct scsi_cmnd *cmd,
449 return 0; 493 return 0;
450 } 494 }
451 495
452 stex_internal_copy(cmd, src, &cp_len, n_elem); 496 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
453 497
454 if (cmd->use_sg) 498 if (cmd->use_sg)
455 pci_unmap_sg(hba->pdev, cmd->request_buffer, 499 pci_unmap_sg(hba->pdev, cmd->request_buffer,
@@ -480,7 +524,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
480 p->subid = 524 p->subid =
481 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; 525 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
482 526
483 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count); 527 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD);
484} 528}
485 529
486static void 530static void
@@ -489,7 +533,6 @@ stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
489 req->tag = cpu_to_le16(tag); 533 req->tag = cpu_to_le16(tag);
490 req->task_attr = TASK_ATTRIBUTE_SIMPLE; 534 req->task_attr = TASK_ATTRIBUTE_SIMPLE;
491 req->task_manage = 0; /* not supported yet */ 535 req->task_manage = 0; /* not supported yet */
492 req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32));
493 536
494 hba->ccb[tag].req = req; 537 hba->ccb[tag].req = req;
495 hba->out_req_cnt++; 538 hba->out_req_cnt++;
@@ -595,8 +638,14 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
595 return SCSI_MLQUEUE_HOST_BUSY; 638 return SCSI_MLQUEUE_HOST_BUSY;
596 639
597 req = stex_alloc_req(hba); 640 req = stex_alloc_req(hba);
598 req->lun = lun; 641
599 req->target = id; 642 if (hba->cardtype == st_yosemite) {
643 req->lun = lun * (ST_MAX_TARGET_NUM - 1) + id;
644 req->target = 0;
645 } else {
646 req->lun = lun;
647 req->target = id;
648 }
600 649
601 /* cdb */ 650 /* cdb */
602 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); 651 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
@@ -680,7 +729,51 @@ static void stex_copy_data(struct st_ccb *ccb,
680 729
681 if (ccb->cmd == NULL) 730 if (ccb->cmd == NULL)
682 return; 731 return;
683 stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count); 732 stex_internal_copy(ccb->cmd,
733 resp->variable, &count, ccb->sg_count, ST_TO_CMD);
734}
735
736static void stex_ys_commands(struct st_hba *hba,
737 struct st_ccb *ccb, struct status_msg *resp)
738{
739 size_t count;
740
741 if (ccb->cmd->cmnd[0] == MGT_CMD &&
742 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
743 ccb->cmd->request_bufflen =
744 le32_to_cpu(*(__le32 *)&resp->variable[0]);
745 return;
746 }
747
748 if (resp->srb_status != 0)
749 return;
750
751 /* determine inquiry command status by DeviceTypeQualifier */
752 if (ccb->cmd->cmnd[0] == INQUIRY &&
753 resp->scsi_status == SAM_STAT_GOOD) {
754 ST_INQ *inq_data;
755
756 count = STEX_EXTRA_SIZE;
757 stex_internal_copy(ccb->cmd, hba->copy_buffer,
758 &count, ccb->sg_count, ST_FROM_CMD);
759 inq_data = (ST_INQ *)hba->copy_buffer;
760 if (inq_data->DeviceTypeQualifier != 0)
761 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
762 else
763 ccb->srb_status = SRB_STATUS_SUCCESS;
764 } else if (ccb->cmd->cmnd[0] == REPORT_LUNS) {
765 u8 *report_lun_data = (u8 *)hba->copy_buffer;
766
767 count = STEX_EXTRA_SIZE;
768 stex_internal_copy(ccb->cmd, report_lun_data,
769 &count, ccb->sg_count, ST_FROM_CMD);
770 if (report_lun_data[2] || report_lun_data[3]) {
771 report_lun_data[2] = 0x00;
772 report_lun_data[3] = 0x08;
773 stex_internal_copy(ccb->cmd, report_lun_data,
774 &count, ccb->sg_count, ST_TO_CMD);
775 }
776 }
684} 777}
685 778
686static void stex_mu_intr(struct st_hba *hba, u32 doorbell) 779static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
@@ -702,8 +795,17 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
702 return; 795 return;
703 } 796 }
704 797
705 if (unlikely(hba->mu_status != MU_STATE_STARTED || 798 /*
706 hba->out_req_cnt <= 0)) { 799 * it's not a valid status payload if:
800 * 1. there are no pending requests(e.g. during init stage)
801 * 2. there are some pending requests, but the controller is in
802 * reset status, and its type is not st_yosemite
803 * firmware of st_yosemite in reset status will return pending requests
804 * to driver, so we allow it to pass
805 */
806 if (unlikely(hba->out_req_cnt <= 0 ||
807 (hba->mu_status == MU_STATE_RESETTING &&
808 hba->cardtype != st_yosemite))) {
707 hba->status_tail = hba->status_head; 809 hba->status_tail = hba->status_head;
708 goto update_status; 810 goto update_status;
709 } 811 }
@@ -723,6 +825,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
723 if (unlikely(ccb->req == NULL)) { 825 if (unlikely(ccb->req == NULL)) {
724 printk(KERN_WARNING DRV_NAME 826 printk(KERN_WARNING DRV_NAME
725 "(%s): lagging req\n", pci_name(hba->pdev)); 827 "(%s): lagging req\n", pci_name(hba->pdev));
828 hba->out_req_cnt--;
726 continue; 829 continue;
727 } 830 }
728 831
@@ -741,9 +844,13 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
741 ccb->scsi_status = resp->scsi_status; 844 ccb->scsi_status = resp->scsi_status;
742 845
743 if (likely(ccb->cmd != NULL)) { 846 if (likely(ccb->cmd != NULL)) {
847 if (hba->cardtype == st_yosemite)
848 stex_ys_commands(hba, ccb, resp);
849
744 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && 850 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
745 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) 851 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
746 stex_controller_info(hba, ccb); 852 stex_controller_info(hba, ccb);
853
747 stex_unmap_sg(hba, ccb->cmd); 854 stex_unmap_sg(hba, ccb->cmd);
748 stex_scsi_done(ccb); 855 stex_scsi_done(ccb);
749 hba->out_req_cnt--; 856 hba->out_req_cnt--;
@@ -948,6 +1055,7 @@ static int stex_reset(struct scsi_cmnd *cmd)
948{ 1055{
949 struct st_hba *hba; 1056 struct st_hba *hba;
950 unsigned long flags; 1057 unsigned long flags;
1058 unsigned long before;
951 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; 1059 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
952 1060
953 hba->mu_status = MU_STATE_RESETTING; 1061 hba->mu_status = MU_STATE_RESETTING;
@@ -955,20 +1063,37 @@ static int stex_reset(struct scsi_cmnd *cmd)
955 if (hba->cardtype == st_shasta) 1063 if (hba->cardtype == st_shasta)
956 stex_hard_reset(hba); 1064 stex_hard_reset(hba);
957 1065
958 if (stex_handshake(hba)) { 1066 if (hba->cardtype != st_yosemite) {
959 printk(KERN_WARNING DRV_NAME 1067 if (stex_handshake(hba)) {
960 "(%s): resetting: handshake failed\n", 1068 printk(KERN_WARNING DRV_NAME
961 pci_name(hba->pdev)); 1069 "(%s): resetting: handshake failed\n",
962 return FAILED; 1070 pci_name(hba->pdev));
1071 return FAILED;
1072 }
1073 spin_lock_irqsave(hba->host->host_lock, flags);
1074 hba->req_head = 0;
1075 hba->req_tail = 0;
1076 hba->status_head = 0;
1077 hba->status_tail = 0;
1078 hba->out_req_cnt = 0;
1079 spin_unlock_irqrestore(hba->host->host_lock, flags);
1080 return SUCCESS;
1081 }
1082
1083 /* st_yosemite */
1084 writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL);
1085 readl(hba->mmio_base + IDBL); /* flush */
1086 before = jiffies;
1087 while (hba->out_req_cnt > 0) {
1088 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1089 printk(KERN_WARNING DRV_NAME
1090 "(%s): reset timeout\n", pci_name(hba->pdev));
1091 return FAILED;
1092 }
1093 msleep(1);
963 } 1094 }
964 spin_lock_irqsave(hba->host->host_lock, flags);
965 hba->req_head = 0;
966 hba->req_tail = 0;
967 hba->status_head = 0;
968 hba->status_tail = 0;
969 hba->out_req_cnt = 0;
970 spin_unlock_irqrestore(hba->host->host_lock, flags);
971 1095
1096 hba->mu_status = MU_STATE_STARTED;
972 return SUCCESS; 1097 return SUCCESS;
973} 1098}
974 1099
@@ -1156,9 +1281,16 @@ static void stex_hba_stop(struct st_hba *hba)
1156 req = stex_alloc_req(hba); 1281 req = stex_alloc_req(hba);
1157 memset(req->cdb, 0, STEX_CDB_LENGTH); 1282 memset(req->cdb, 0, STEX_CDB_LENGTH);
1158 1283
1159 req->cdb[0] = CONTROLLER_CMD; 1284 if (hba->cardtype == st_yosemite) {
1160 req->cdb[1] = CTLR_POWER_STATE_CHANGE; 1285 req->cdb[0] = MGT_CMD;
1161 req->cdb[2] = CTLR_POWER_SAVING; 1286 req->cdb[1] = MGT_CMD_SIGNATURE;
1287 req->cdb[2] = CTLR_CONFIG_CMD;
1288 req->cdb[3] = CTLR_SHUTDOWN;
1289 } else {
1290 req->cdb[0] = CONTROLLER_CMD;
1291 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1292 req->cdb[2] = CTLR_POWER_SAVING;
1293 }
1162 1294
1163 hba->ccb[tag].cmd = NULL; 1295 hba->ccb[tag].cmd = NULL;
1164 hba->ccb[tag].sg_count = 0; 1296 hba->ccb[tag].sg_count = 0;
@@ -1222,6 +1354,7 @@ static struct pci_device_id stex_pci_tbl[] = {
1222 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1354 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1223 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1355 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1224 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, 1356 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1357 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite },
1225 { } /* terminate list */ 1358 { } /* terminate list */
1226}; 1359};
1227MODULE_DEVICE_TABLE(pci, stex_pci_tbl); 1360MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9404ff3d4c7..028d5f641cc 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -279,6 +279,10 @@ static void dc390_ResetDevParam(struct dc390_acb* pACB);
279static u32 dc390_laststatus = 0; 279static u32 dc390_laststatus = 0;
280static u8 dc390_adapterCnt = 0; 280static u8 dc390_adapterCnt = 0;
281 281
282static int disable_clustering;
283module_param(disable_clustering, int, S_IRUGO);
284MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1");
285
282/* Startup values, to be overriden on the commandline */ 286/* Startup values, to be overriden on the commandline */
283static int tmscsim[] = {-2, -2, -2, -2, -2, -2}; 287static int tmscsim[] = {-2, -2, -2, -2, -2, -2};
284 288
@@ -2299,7 +2303,7 @@ static struct scsi_host_template driver_template = {
2299 .this_id = 7, 2303 .this_id = 7,
2300 .sg_tablesize = SG_ALL, 2304 .sg_tablesize = SG_ALL,
2301 .cmd_per_lun = 1, 2305 .cmd_per_lun = 1,
2302 .use_clustering = DISABLE_CLUSTERING, 2306 .use_clustering = ENABLE_CLUSTERING,
2303}; 2307};
2304 2308
2305/*********************************************************************** 2309/***********************************************************************
@@ -2525,6 +2529,8 @@ static int __devinit dc390_probe_one(struct pci_dev *pdev,
2525 pci_set_master(pdev); 2529 pci_set_master(pdev);
2526 2530
2527 error = -ENOMEM; 2531 error = -ENOMEM;
2532 if (disable_clustering)
2533 driver_template.use_clustering = DISABLE_CLUSTERING;
2528 shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb)); 2534 shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb));
2529 if (!shost) 2535 if (!shost)
2530 goto out_disable_device; 2536 goto out_disable_device;
@@ -2660,6 +2666,10 @@ static struct pci_driver dc390_driver = {
2660 2666
2661static int __init dc390_module_init(void) 2667static int __init dc390_module_init(void)
2662{ 2668{
2669 if (!disable_clustering)
2670 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"
2671 "\twith \"disable_clustering=1\" and report to maintainers\n");
2672
2663 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2673 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2664 tmscsim[0] = 7; 2674 tmscsim[0] = 7;
2665 tmscsim[1] = 4; 2675 tmscsim[1] = 4;