aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/qla4xxx/Kconfig7
-rw-r--r--drivers/scsi/qla4xxx/Makefile5
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c197
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h586
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h843
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1340
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h84
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c368
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c797
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c930
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c224
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h256
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1755
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h13
18 files changed, 7540 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8a22a71280a..3ff5ec8f0d3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1244,6 +1244,7 @@ config SCSI_QLOGICPTI
1244 module will be called qlogicpti. 1244 module will be called qlogicpti.
1245 1245
1246source "drivers/scsi/qla2xxx/Kconfig" 1246source "drivers/scsi/qla2xxx/Kconfig"
1247source "drivers/scsi/qla4xxx/Kconfig"
1247 1248
1248config SCSI_LPFC 1249config SCSI_LPFC
1249 tristate "Emulex LightPulse Fibre Channel Support" 1250 tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1ef951be7a5..bcca39c3bcb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
84obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o 84obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
85obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 85obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
87obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
87obj-$(CONFIG_SCSI_LPFC) += lpfc/ 88obj-$(CONFIG_SCSI_LPFC) += lpfc/
88obj-$(CONFIG_SCSI_PAS16) += pas16.o 89obj-$(CONFIG_SCSI_PAS16) += pas16.o
89obj-$(CONFIG_SCSI_SEAGATE) += seagate.o 90obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
new file mode 100644
index 00000000000..08a07f0b8d9
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX host adapter family support"
3 depends on PCI && SCSI
4 select SCSI_ISCSI_ATTRS
5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) iSCSI host
7 adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
new file mode 100644
index 00000000000..86ea37baa0f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -0,0 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nvram.o ql4_dbg.o
3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
new file mode 100644
index 00000000000..752031fadfe
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -0,0 +1,197 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include <scsi/scsi_dbg.h>
10
11static void qla4xxx_print_srb_info(struct srb * srb)
12{
13 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
14 printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
15 __func__, srb->cmd, (unsigned long) srb->dma_handle);
16 printk("%s: fw_ddb_index = %d, lun = %d\n",
17 __func__, srb->fw_ddb_index, srb->cmd->device->lun);
18 printk("%s: iocb_tov = %d\n",
19 __func__, srb->iocb_tov);
20 printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
21 __func__, srb->cc_stat, srb->r_start, srb->u_start);
22}
23
24void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
25{
26 printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
27 printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
28 cmd->device->channel, cmd->device->id, cmd->device->lun,
29 cmd->cmd_len);
30 scsi_print_command(cmd);
31 printk(" seg_cnt = %d\n", cmd->use_sg);
32 printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
33 cmd->request_buffer, cmd->request_bufflen);
34 if (cmd->use_sg) {
35 struct scatterlist *sg;
36 sg = (struct scatterlist *)cmd->request_buffer;
37 printk(" SG buffer: \n");
38 qla4xxx_dump_buffer((caddr_t) sg,
39 (cmd->use_sg * sizeof(*sg)));
40 }
41 printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
42 cmd->transfersize);
43 printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
44 printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
45 cmd->sc_data_direction);
46 printk(" Current time (jiffies) = 0x%lx, "
47 "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
48 qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
49}
50
51void __dump_registers(struct scsi_qla_host *ha)
52{
53 uint8_t i;
54 for (i = 0; i < MBOX_REG_COUNT; i++) {
55 printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
56 (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
57 readw(&ha->reg->mailbox[i]));
58 }
59 printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
60 (uint8_t) offsetof(struct isp_reg, flash_address),
61 readw(&ha->reg->flash_address));
62 printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
63 (uint8_t) offsetof(struct isp_reg, flash_data),
64 readw(&ha->reg->flash_data));
65 printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
66 (uint8_t) offsetof(struct isp_reg, ctrl_status),
67 readw(&ha->reg->ctrl_status));
68 if (is_qla4010(ha)) {
69 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
70 (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
71 readw(&ha->reg->u1.isp4010.nvram));
72 }
73
74 else if (is_qla4022(ha)) {
75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
76 (uint8_t) offsetof(struct isp_reg,
77 u1.isp4022.intr_mask),
78 readw(&ha->reg->u1.isp4022.intr_mask));
79 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
80 (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
81 readw(&ha->reg->u1.isp4022.nvram));
82 printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
83 (uint8_t) offsetof(struct isp_reg,
84 u1.isp4022.semaphore),
85 readw(&ha->reg->u1.isp4022.semaphore));
86 }
87 printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
88 (uint8_t) offsetof(struct isp_reg, req_q_in),
89 readw(&ha->reg->req_q_in));
90 printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
91 (uint8_t) offsetof(struct isp_reg, rsp_q_out),
92 readw(&ha->reg->rsp_q_out));
93 if (is_qla4010(ha)) {
94 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
95 (uint8_t) offsetof(struct isp_reg,
96 u2.isp4010.ext_hw_conf),
97 readw(&ha->reg->u2.isp4010.ext_hw_conf));
98 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
99 (uint8_t) offsetof(struct isp_reg,
100 u2.isp4010.port_ctrl),
101 readw(&ha->reg->u2.isp4010.port_ctrl));
102 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
103 (uint8_t) offsetof(struct isp_reg,
104 u2.isp4010.port_status),
105 readw(&ha->reg->u2.isp4010.port_status));
106 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
107 (uint8_t) offsetof(struct isp_reg,
108 u2.isp4010.req_q_out),
109 readw(&ha->reg->u2.isp4010.req_q_out));
110 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
111 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
112 readw(&ha->reg->u2.isp4010.gp_out));
113 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
114 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
115 readw(&ha->reg->u2.isp4010.gp_in));
116 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
117 (uint8_t) offsetof(struct isp_reg,
118 u2.isp4010.port_err_status),
119 readw(&ha->reg->u2.isp4010.port_err_status));
120 }
121
122 else if (is_qla4022(ha)) {
123 printk(KERN_INFO "Page 0 Registers:\n");
124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
125 (uint8_t) offsetof(struct isp_reg,
126 u2.isp4022.p0.ext_hw_conf),
127 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
128 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
129 (uint8_t) offsetof(struct isp_reg,
130 u2.isp4022.p0.port_ctrl),
131 readw(&ha->reg->u2.isp4022.p0.port_ctrl));
132 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
133 (uint8_t) offsetof(struct isp_reg,
134 u2.isp4022.p0.port_status),
135 readw(&ha->reg->u2.isp4022.p0.port_status));
136 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
137 (uint8_t) offsetof(struct isp_reg,
138 u2.isp4022.p0.gp_out),
139 readw(&ha->reg->u2.isp4022.p0.gp_out));
140 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
141 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
142 readw(&ha->reg->u2.isp4022.p0.gp_in));
143 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
144 (uint8_t) offsetof(struct isp_reg,
145 u2.isp4022.p0.port_err_status),
146 readw(&ha->reg->u2.isp4022.p0.port_err_status));
147 printk(KERN_INFO "Page 1 Registers:\n");
148 writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
149 &ha->reg->ctrl_status);
150 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
151 (uint8_t) offsetof(struct isp_reg,
152 u2.isp4022.p1.req_q_out),
153 readw(&ha->reg->u2.isp4022.p1.req_q_out));
154 writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
155 &ha->reg->ctrl_status);
156 }
157}
158
159void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
160{
161 unsigned long flags = 0;
162 int i = 0;
163 spin_lock_irqsave(&ha->hardware_lock, flags);
164 for (i = 1; i < MBOX_REG_COUNT; i++)
165 printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
166 readw(&ha->reg->mailbox[i]));
167 spin_unlock_irqrestore(&ha->hardware_lock, flags);
168}
169
170void qla4xxx_dump_registers(struct scsi_qla_host *ha)
171{
172 unsigned long flags = 0;
173 spin_lock_irqsave(&ha->hardware_lock, flags);
174 __dump_registers(ha);
175 spin_unlock_irqrestore(&ha->hardware_lock, flags);
176}
177
178void qla4xxx_dump_buffer(void *b, uint32_t size)
179{
180 uint32_t cnt;
181 uint8_t *c = b;
182
183 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
184 "Fh\n");
185 printk("------------------------------------------------------------"
186 "--\n");
187 for (cnt = 0; cnt < size; cnt++, c++) {
188 printk(KERN_DEBUG "%02x", *c);
189 if (!(cnt % 16))
190 printk(KERN_DEBUG "\n");
191
192 else
193 printk(KERN_DEBUG " ");
194 }
195 if (cnt % 16)
196 printk(KERN_DEBUG "\n");
197}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
new file mode 100644
index 00000000000..56ddc227f84
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -0,0 +1,55 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8/*
9 * Driver debug definitions.
10 */
11/* #define QL_DEBUG */ /* DEBUG messages */
12/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
13/* #define QL_DEBUG_LEVEL_4 */
14/* #define QL_DEBUG_LEVEL_5 */
15/* #define QL_DEBUG_LEVEL_9 */
16
17#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
18#if defined(QL_DEBUG)
19#define DEBUG(x) do {x;} while (0);
20#else
21#define DEBUG(x) do {} while (0);
22#endif
23
24#if defined(QL_DEBUG_LEVEL_2)
25#define DEBUG2(x) do {if(extended_error_logging == 2) x;} while (0);
26#define DEBUG2_3(x) do {x;} while (0);
27#else /* */
28#define DEBUG2(x) do {} while (0);
29#endif /* */
30
31#if defined(QL_DEBUG_LEVEL_3)
32#define DEBUG3(x) do {if(extended_error_logging == 3) x;} while (0);
33#else /* */
34#define DEBUG3(x) do {} while (0);
35#if !defined(QL_DEBUG_LEVEL_2)
36#define DEBUG2_3(x) do {} while (0);
37#endif /* */
38#endif /* */
39#if defined(QL_DEBUG_LEVEL_4)
40#define DEBUG4(x) do {x;} while (0);
41#else /* */
42#define DEBUG4(x) do {} while (0);
43#endif /* */
44
45#if defined(QL_DEBUG_LEVEL_5)
46#define DEBUG5(x) do {x;} while (0);
47#else /* */
48#define DEBUG5(x) do {} while (0);
49#endif /* */
50
51#if defined(QL_DEBUG_LEVEL_9)
52#define DEBUG9(x) do {x;} while (0);
53#else /* */
54#define DEBUG9(x) do {} while (0);
55#endif /* */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
new file mode 100644
index 00000000000..a7f6c7b1c59
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -0,0 +1,586 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QL4_DEF_H
9#define __QL4_DEF_H
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/module.h>
15#include <linux/list.h>
16#include <linux/pci.h>
17#include <linux/dma-mapping.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/dmapool.h>
21#include <linux/mempool.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/mutex.h>
27
28#include <net/tcp.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_transport_iscsi.h>
35
36
37#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
38#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
39#endif
40
41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
43#endif /* */
44
45#define QLA_SUCCESS 0
46#define QLA_ERROR 1
47
48/*
49 * Data bit definitions
50 */
51#define BIT_0 0x1
52#define BIT_1 0x2
53#define BIT_2 0x4
54#define BIT_3 0x8
55#define BIT_4 0x10
56#define BIT_5 0x20
57#define BIT_6 0x40
58#define BIT_7 0x80
59#define BIT_8 0x100
60#define BIT_9 0x200
61#define BIT_10 0x400
62#define BIT_11 0x800
63#define BIT_12 0x1000
64#define BIT_13 0x2000
65#define BIT_14 0x4000
66#define BIT_15 0x8000
67#define BIT_16 0x10000
68#define BIT_17 0x20000
69#define BIT_18 0x40000
70#define BIT_19 0x80000
71#define BIT_20 0x100000
72#define BIT_21 0x200000
73#define BIT_22 0x400000
74#define BIT_23 0x800000
75#define BIT_24 0x1000000
76#define BIT_25 0x2000000
77#define BIT_26 0x4000000
78#define BIT_27 0x8000000
79#define BIT_28 0x10000000
80#define BIT_29 0x20000000
81#define BIT_30 0x40000000
82#define BIT_31 0x80000000
83
84/*
85 * Host adapter default definitions
86 ***********************************/
87#define MAX_HBAS 16
88#define MAX_BUSES 1
89#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
90#define MAX_LUNS 0xffff
91#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
92#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
93#define MAX_PDU_ENTRIES 32
94#define INVALID_ENTRY 0xFFFF
95#define MAX_CMDS_TO_RISC 1024
96#define MAX_SRBS MAX_CMDS_TO_RISC
97#define MBOX_AEN_REG_COUNT 5
98#define MAX_INIT_RETRIES 5
99#define IOCB_HIWAT_CUSHION 16
100
101/*
102 * Buffer sizes
103 */
104#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC
105#define RESPONSE_QUEUE_DEPTH 64
106#define QUEUE_SIZE 64
107#define DMA_BUFFER_SIZE 512
108
109/*
110 * Misc
111 */
112#define MAC_ADDR_LEN 6 /* in bytes */
113#define IP_ADDR_LEN 4 /* in bytes */
114#define DRIVER_NAME "qla4xxx"
115
116#define MAX_LINKED_CMDS_PER_LUN 3
117#define MAX_REQS_SERVICED_PER_INTR 16
118
119#define ISCSI_IPADDR_SIZE 4 /* IP address size */
120#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
121#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
122 * usually a string */
123
124#define LSDW(x) ((u32)((u64)(x)))
125#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
126
127/*
128 * Retry & Timeout Values
129 */
130#define MBOX_TOV 60
131#define SOFT_RESET_TOV 30
132#define RESET_INTR_TOV 3
133#define SEMAPHORE_TOV 10
134#define ADAPTER_INIT_TOV 120
135#define ADAPTER_RESET_TOV 180
136#define EXTEND_CMD_TOV 60
137#define WAIT_CMD_TOV 30
138#define EH_WAIT_CMD_TOV 120
139#define FIRMWARE_UP_TOV 60
140#define RESET_FIRMWARE_TOV 30
141#define LOGOUT_TOV 10
142#define IOCB_TOV_MARGIN 10
143#define RELOGIN_TOV 18
144#define ISNS_DEREG_TOV 5
145
146#define MAX_RESET_HA_RETRIES 2
147
148/*
149 * SCSI Request Block structure (srb) that is placed
150 * on cmd->SCp location of every I/O [We have 22 bytes available]
151 */
152struct srb {
153 struct list_head list; /* (8) */
154 struct scsi_qla_host *ha; /* HA the SP is queued on */
155 struct ddb_entry *ddb;
156 uint16_t flags; /* (1) Status flags. */
157
158#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
159#define SRB_GOT_SENSE BIT_4 /* sense data recieved. */
160 uint8_t state; /* (1) Status flags. */
161
162#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */
163#define SRB_FREE_STATE 1
164#define SRB_ACTIVE_STATE 3
165#define SRB_ACTIVE_TIMEOUT_STATE 4
166#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */
167
168 struct scsi_cmnd *cmd; /* (4) SCSI command block */
169 dma_addr_t dma_handle; /* (4) for unmap of single transfers */
170 atomic_t ref_count; /* reference count for this srb */
171 uint32_t fw_ddb_index;
172 uint8_t err_id; /* error id */
173#define SRB_ERR_PORT 1 /* Request failed because "port down" */
174#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
175#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */
176#define SRB_ERR_OTHER 4
177
178 uint16_t reserved;
179 uint16_t iocb_tov;
180 uint16_t iocb_cnt; /* Number of used iocbs */
181 uint16_t cc_stat;
182 u_long r_start; /* Time we recieve a cmd from OS */
183 u_long u_start; /* Time when we handed the cmd to F/W */
184};
185
186 /*
187 * Device Database (DDB) structure
188 */
189struct ddb_entry {
190 struct list_head list; /* ddb list */
191 struct scsi_qla_host *ha;
192 struct iscsi_cls_session *sess;
193 struct iscsi_cls_conn *conn;
194
195 atomic_t state; /* DDB State */
196
197 unsigned long flags; /* DDB Flags */
198
199 unsigned long dev_scan_wait_to_start_relogin;
200 unsigned long dev_scan_wait_to_complete_relogin;
201
202 uint16_t os_target_id; /* Target ID */
203 uint16_t fw_ddb_index; /* DDB firmware index */
204 uint8_t reserved[2];
205 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
206
207 uint32_t CmdSn;
208 uint16_t target_session_id;
209 uint16_t connection_id;
210 uint16_t exe_throttle; /* Max mumber of cmds outstanding
211 * simultaneously */
212 uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
213 * complete */
214 uint16_t default_relogin_timeout; /* Max time to wait for
215 * relogin to complete */
216 uint16_t tcp_source_port_num;
217 uint32_t default_time2wait; /* Default Min time between
218 * relogins (+aens) */
219
220 atomic_t port_down_timer; /* Device connection timer */
221 atomic_t retry_relogin_timer; /* Min Time between relogins
222 * (4000 only) */
223 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
224 atomic_t relogin_retry_count; /* Num of times relogin has been
225 * retried */
226
227 uint16_t port;
228 uint32_t tpgt;
229 uint8_t ip_addr[ISCSI_IPADDR_SIZE];
230 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
231 uint8_t iscsi_alias[0x20];
232};
233
234/*
235 * DDB states.
236 */
237#define DDB_STATE_DEAD 0 /* We can no longer talk to
238 * this device */
239#define DDB_STATE_ONLINE 1 /* Device ready to accept
240 * commands */
241#define DDB_STATE_MISSING 2 /* Device logged off, trying
242 * to re-login */
243
244/*
245 * DDB flags.
246 */
247#define DF_RELOGIN 0 /* Relogin to device */
248#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
249 * logged it out */
250#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
251#define DF_FO_MASKED 3
252
253/*
254 * Asynchronous Event Queue structure
255 */
256struct aen {
257 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
258};
259
260
261#include "ql4_fw.h"
262#include "ql4_nvram.h"
263
264/*
265 * Linux Host Adapter structure
266 */
267struct scsi_qla_host {
268 /* Linux adapter configuration data */
269 struct Scsi_Host *host; /* pointer to host data */
270 uint32_t tot_ddbs;
271 unsigned long flags;
272
273#define AF_ONLINE 0 /* 0x00000001 */
274#define AF_INIT_DONE 1 /* 0x00000002 */
275#define AF_MBOX_COMMAND 2 /* 0x00000004 */
276#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
277#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
278#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
279#define AF_LINK_UP 8 /* 0x00000100 */
280#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
281#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
282#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
283#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
284
285 unsigned long dpc_flags;
286
287#define DPC_RESET_HA 1 /* 0x00000002 */
288#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
289#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
290#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
291#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
292#define DPC_ISNS_RESTART 7 /* 0x00000080 */
293#define DPC_AEN 9 /* 0x00000200 */
294#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
295
296 uint16_t iocb_cnt;
297 uint16_t iocb_hiwat;
298
299 /* SRB cache. */
300#define SRB_MIN_REQ 128
301 mempool_t *srb_mempool;
302
303 /* pci information */
304 struct pci_dev *pdev;
305
306 struct isp_reg __iomem *reg; /* Base I/O address */
307 unsigned long pio_address;
308 unsigned long pio_length;
309#define MIN_IOBASE_LEN 0x100
310
311 uint16_t req_q_count;
312 uint8_t marker_needed;
313 uint8_t rsvd1;
314
315 unsigned long host_no;
316
317 /* NVRAM registers */
318 struct eeprom_data *nvram;
319 spinlock_t hardware_lock ____cacheline_aligned;
320 spinlock_t list_lock;
321 uint32_t eeprom_cmd_data;
322
323 /* Counters for general statistics */
324 uint64_t adapter_error_count;
325 uint64_t device_error_count;
326 uint64_t total_io_count;
327 uint64_t total_mbytes_xferred;
328 uint64_t link_failure_count;
329 uint64_t invalid_crc_count;
330 uint32_t spurious_int_count;
331 uint32_t aborted_io_count;
332 uint32_t io_timeout_count;
333 uint32_t mailbox_timeout_count;
334 uint32_t seconds_since_last_intr;
335 uint32_t seconds_since_last_heartbeat;
336 uint32_t mac_index;
337
338 /* Info Needed for Management App */
339 /* --- From GetFwVersion --- */
340 uint32_t firmware_version[2];
341 uint32_t patch_number;
342 uint32_t build_number;
343
344 /* --- From Init_FW --- */
345 /* init_cb_t *init_cb; */
346 uint16_t firmware_options;
347 uint16_t tcp_options;
348 uint8_t ip_address[IP_ADDR_LEN];
349 uint8_t subnet_mask[IP_ADDR_LEN];
350 uint8_t gateway[IP_ADDR_LEN];
351 uint8_t alias[32];
352 uint8_t name_string[256];
353 uint8_t heartbeat_interval;
354 uint8_t rsvd;
355
356 /* --- From FlashSysInfo --- */
357 uint8_t my_mac[MAC_ADDR_LEN];
358 uint8_t serial_number[16];
359
360 /* --- From GetFwState --- */
361 uint32_t firmware_state;
362 uint32_t board_id;
363 uint32_t addl_fw_state;
364
365 /* Linux kernel thread */
366 struct workqueue_struct *dpc_thread;
367 struct work_struct dpc_work;
368
369 /* Linux timer thread */
370 struct timer_list timer;
371 uint32_t timer_active;
372
373 /* Recovery Timers */
374 uint32_t port_down_retry_count;
375 uint32_t discovery_wait;
376 atomic_t check_relogin_timeouts;
377 uint32_t retry_reset_ha_cnt;
378 uint32_t isp_reset_timer; /* reset test timer */
379 uint32_t nic_reset_timer; /* simulated nic reset test timer */
380 int eh_start;
381 struct list_head free_srb_q;
382 uint16_t free_srb_q_count;
383 uint16_t num_srbs_allocated;
384
385 /* DMA Memory Block */
386 void *queues;
387 dma_addr_t queues_dma;
388 unsigned long queues_len;
389
390#define MEM_ALIGN_VALUE \
391 ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
392 sizeof(struct queue_entry))
393 /* request and response queue variables */
394 dma_addr_t request_dma;
395 struct queue_entry *request_ring;
396 struct queue_entry *request_ptr;
397 dma_addr_t response_dma;
398 struct queue_entry *response_ring;
399 struct queue_entry *response_ptr;
400 dma_addr_t shadow_regs_dma;
401 struct shadow_regs *shadow_regs;
402 uint16_t request_in; /* Current indexes. */
403 uint16_t request_out;
404 uint16_t response_in;
405 uint16_t response_out;
406
407 /* aen queue variables */
408 uint16_t aen_q_count; /* Number of available aen_q entries */
409 uint16_t aen_in; /* Current indexes */
410 uint16_t aen_out;
411 struct aen aen_q[MAX_AEN_ENTRIES];
412
413 /* This mutex protects several threads to do mailbox commands
414 * concurrently.
415 */
416 struct mutex mbox_sem;
417 wait_queue_head_t mailbox_wait_queue;
418
419 /* temporary mailbox status registers */
420 volatile uint8_t mbox_status_count;
421 volatile uint32_t mbox_status[MBOX_REG_COUNT];
422
423 /* local device database list (contains internal ddb entries) */
424 struct list_head ddb_list;
425
426 /* Map ddb_list entry by FW ddb index */
427 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
428
429};
430
431static inline int is_qla4010(struct scsi_qla_host *ha)
432{
433 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
434}
435
436static inline int is_qla4022(struct scsi_qla_host *ha)
437{
438 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
439}
440
441static inline int adapter_up(struct scsi_qla_host *ha)
442{
443 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
444 (test_bit(AF_LINK_UP, &ha->flags) != 0);
445}
446
447static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
448{
449 return (struct scsi_qla_host *)shost->hostdata;
450}
451
452static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
453{
454 return (is_qla4022(ha) ?
455 &ha->reg->u1.isp4022.semaphore :
456 &ha->reg->u1.isp4010.nvram);
457}
458
459static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
460{
461 return (is_qla4022(ha) ?
462 &ha->reg->u1.isp4022.nvram :
463 &ha->reg->u1.isp4010.nvram);
464}
465
466static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
467{
468 return (is_qla4022(ha) ?
469 &ha->reg->u2.isp4022.p0.ext_hw_conf :
470 &ha->reg->u2.isp4010.ext_hw_conf);
471}
472
473static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
474{
475 return (is_qla4022(ha) ?
476 &ha->reg->u2.isp4022.p0.port_status :
477 &ha->reg->u2.isp4010.port_status);
478}
479
480static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
481{
482 return (is_qla4022(ha) ?
483 &ha->reg->u2.isp4022.p0.port_ctrl :
484 &ha->reg->u2.isp4010.port_ctrl);
485}
486
487static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
488{
489 return (is_qla4022(ha) ?
490 &ha->reg->u2.isp4022.p0.port_err_status :
491 &ha->reg->u2.isp4010.port_err_status);
492}
493
494static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
495{
496 return (is_qla4022(ha) ?
497 &ha->reg->u2.isp4022.p0.gp_out :
498 &ha->reg->u2.isp4010.gp_out);
499}
500
501static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
502{
503 return (is_qla4022(ha) ?
504 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
505 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
506}
507
508int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
509void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
510int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
511
512static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
513{
514 if (is_qla4022(a))
515 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
516 (QL4022_RESOURCE_BITS_BASE_CODE |
517 (a->mac_index)) << 13);
518 else
519 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
520 QL4010_FLASH_SEM_BITS);
521}
522
523static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
524{
525 if (is_qla4022(a))
526 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
527 else
528 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
529}
530
531static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
532{
533 if (is_qla4022(a))
534 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
535 (QL4022_RESOURCE_BITS_BASE_CODE |
536 (a->mac_index)) << 10);
537 else
538 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
539 QL4010_NVRAM_SEM_BITS);
540}
541
542static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
543{
544 if (is_qla4022(a))
545 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
546 else
547 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
548}
549
550static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
551{
552 if (is_qla4022(a))
553 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
554 (QL4022_RESOURCE_BITS_BASE_CODE |
555 (a->mac_index)) << 1);
556 else
557 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
558 QL4010_DRVR_SEM_BITS);
559}
560
561static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
562{
563 if (is_qla4022(a))
564 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
565 else
566 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
567}
568
569/*---------------------------------------------------------------------------*/
570
571/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
572#define PRESERVE_DDB_LIST 0
573#define REBUILD_DDB_LIST 1
574
575/* Defines for process_aen() */
576#define PROCESS_ALL_AENS 0
577#define FLUSH_DDB_CHANGED_AENS 1
578#define RELOGIN_DDB_CHANGED_AENS 2
579
580#include "ql4_version.h"
581#include "ql4_glbl.h"
582#include "ql4_dbg.h"
583#include "ql4_inline.h"
584
585
586#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
new file mode 100644
index 00000000000..427489de64b
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -0,0 +1,843 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef _QLA4X_FW_H
9#define _QLA4X_FW_H
10
11
12#define MAX_PRST_DEV_DB_ENTRIES 64
13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
14#define MAX_DEV_DB_ENTRIES 512
15
16/*************************************************************************
17 *
18 * ISP 4010 I/O Register Set Structure and Definitions
19 *
20 *************************************************************************/
21
22struct port_ctrl_stat_regs {
23 __le32 ext_hw_conf; /* 80 x50 R/W */
24 __le32 intChipConfiguration; /* 84 x54 */
25 __le32 port_ctrl; /* 88 x58 */
26 __le32 port_status; /* 92 x5c */
27 __le32 HostPrimMACHi; /* 96 x60 */
28 __le32 HostPrimMACLow; /* 100 x64 */
29 __le32 HostSecMACHi; /* 104 x68 */
30 __le32 HostSecMACLow; /* 108 x6c */
31 __le32 EPPrimMACHi; /* 112 x70 */
32 __le32 EPPrimMACLow; /* 116 x74 */
33 __le32 EPSecMACHi; /* 120 x78 */
34 __le32 EPSecMACLow; /* 124 x7c */
35 __le32 HostPrimIPHi; /* 128 x80 */
36 __le32 HostPrimIPMidHi; /* 132 x84 */
37 __le32 HostPrimIPMidLow; /* 136 x88 */
38 __le32 HostPrimIPLow; /* 140 x8c */
39 __le32 HostSecIPHi; /* 144 x90 */
40 __le32 HostSecIPMidHi; /* 148 x94 */
41 __le32 HostSecIPMidLow; /* 152 x98 */
42 __le32 HostSecIPLow; /* 156 x9c */
43 __le32 EPPrimIPHi; /* 160 xa0 */
44 __le32 EPPrimIPMidHi; /* 164 xa4 */
45 __le32 EPPrimIPMidLow; /* 168 xa8 */
46 __le32 EPPrimIPLow; /* 172 xac */
47 __le32 EPSecIPHi; /* 176 xb0 */
48 __le32 EPSecIPMidHi; /* 180 xb4 */
49 __le32 EPSecIPMidLow; /* 184 xb8 */
50 __le32 EPSecIPLow; /* 188 xbc */
51 __le32 IPReassemblyTimeout; /* 192 xc0 */
52 __le32 EthMaxFramePayload; /* 196 xc4 */
53 __le32 TCPMaxWindowSize; /* 200 xc8 */
54 __le32 TCPCurrentTimestampHi; /* 204 xcc */
55 __le32 TCPCurrentTimestampLow; /* 208 xd0 */
56 __le32 LocalRAMAddress; /* 212 xd4 */
57 __le32 LocalRAMData; /* 216 xd8 */
58 __le32 PCSReserved1; /* 220 xdc */
59 __le32 gp_out; /* 224 xe0 */
60 __le32 gp_in; /* 228 xe4 */
61 __le32 ProbeMuxAddr; /* 232 xe8 */
62 __le32 ProbeMuxData; /* 236 xec */
63 __le32 ERMQueueBaseAddr0; /* 240 xf0 */
64 __le32 ERMQueueBaseAddr1; /* 244 xf4 */
65 __le32 MACConfiguration; /* 248 xf8 */
66 __le32 port_err_status; /* 252 xfc COR */
67};
68
69struct host_mem_cfg_regs {
70 __le32 NetRequestQueueOut; /* 80 x50 */
71 __le32 NetRequestQueueOutAddrHi; /* 84 x54 */
72 __le32 NetRequestQueueOutAddrLow; /* 88 x58 */
73 __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
74 __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
75 __le32 NetRequestQueueLength; /* 100 x64 */
76 __le32 NetResponseQueueIn; /* 104 x68 */
77 __le32 NetResponseQueueInAddrHi; /* 108 x6c */
78 __le32 NetResponseQueueInAddrLow; /* 112 x70 */
79 __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
80 __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
81 __le32 NetResponseQueueLength; /* 124 x7c */
82 __le32 req_q_out; /* 128 x80 */
83 __le32 RequestQueueOutAddrHi; /* 132 x84 */
84 __le32 RequestQueueOutAddrLow; /* 136 x88 */
85 __le32 RequestQueueBaseAddrHi; /* 140 x8c */
86 __le32 RequestQueueBaseAddrLow; /* 144 x90 */
87 __le32 RequestQueueLength; /* 148 x94 */
88 __le32 ResponseQueueIn; /* 152 x98 */
89 __le32 ResponseQueueInAddrHi; /* 156 x9c */
90 __le32 ResponseQueueInAddrLow; /* 160 xa0 */
91 __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
92 __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
93 __le32 ResponseQueueLength; /* 172 xac */
94 __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
95 __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
96 __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
97 __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
98 __le32 NetRxLargeBufferLength; /* 192 xc0 */
99 __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
100 __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
101 __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
102 __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
103 __le32 NetRxSmallBufferLength; /* 212 xd4 */
104 __le32 HMCReserved0[10]; /* 216 xd8 */
105};
106
107struct local_ram_cfg_regs {
108 __le32 BufletSize; /* 80 x50 */
109 __le32 BufletMaxCount; /* 84 x54 */
110 __le32 BufletCurrCount; /* 88 x58 */
111 __le32 BufletPauseThresholdCount; /* 92 x5c */
112 __le32 BufletTCPWinThresholdHi; /* 96 x60 */
113 __le32 BufletTCPWinThresholdLow; /* 100 x64 */
114 __le32 IPHashTableBaseAddr; /* 104 x68 */
115 __le32 IPHashTableSize; /* 108 x6c */
116 __le32 TCPHashTableBaseAddr; /* 112 x70 */
117 __le32 TCPHashTableSize; /* 116 x74 */
118 __le32 NCBAreaBaseAddr; /* 120 x78 */
119 __le32 NCBMaxCount; /* 124 x7c */
120 __le32 NCBCurrCount; /* 128 x80 */
121 __le32 DRBAreaBaseAddr; /* 132 x84 */
122 __le32 DRBMaxCount; /* 136 x88 */
123 __le32 DRBCurrCount; /* 140 x8c */
124 __le32 LRCReserved[28]; /* 144 x90 */
125};
126
127struct prot_stat_regs {
128 __le32 MACTxFrameCount; /* 80 x50 R */
129 __le32 MACTxByteCount; /* 84 x54 R */
130 __le32 MACRxFrameCount; /* 88 x58 R */
131 __le32 MACRxByteCount; /* 92 x5c R */
132 __le32 MACCRCErrCount; /* 96 x60 R */
133 __le32 MACEncErrCount; /* 100 x64 R */
134 __le32 MACRxLengthErrCount; /* 104 x68 R */
135 __le32 IPTxPacketCount; /* 108 x6c R */
136 __le32 IPTxByteCount; /* 112 x70 R */
137 __le32 IPTxFragmentCount; /* 116 x74 R */
138 __le32 IPRxPacketCount; /* 120 x78 R */
139 __le32 IPRxByteCount; /* 124 x7c R */
140 __le32 IPRxFragmentCount; /* 128 x80 R */
141 __le32 IPDatagramReassemblyCount; /* 132 x84 R */
142 __le32 IPV6RxPacketCount; /* 136 x88 R */
143 __le32 IPErrPacketCount; /* 140 x8c R */
144 __le32 IPReassemblyErrCount; /* 144 x90 R */
145 __le32 TCPTxSegmentCount; /* 148 x94 R */
146 __le32 TCPTxByteCount; /* 152 x98 R */
147 __le32 TCPRxSegmentCount; /* 156 x9c R */
148 __le32 TCPRxByteCount; /* 160 xa0 R */
149 __le32 TCPTimerExpCount; /* 164 xa4 R */
150 __le32 TCPRxAckCount; /* 168 xa8 R */
151 __le32 TCPTxAckCount; /* 172 xac R */
152 __le32 TCPRxErrOOOCount; /* 176 xb0 R */
153 __le32 PSReserved0; /* 180 xb4 */
154 __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
155 __le32 ECCErrCorrectionCount; /* 188 xbc R */
156 __le32 PSReserved1[16]; /* 192 xc0 */
157};
158
159
160/* remote register set (access via PCI memory read/write) */
161struct isp_reg {
162#define MBOX_REG_COUNT 8
163 __le32 mailbox[MBOX_REG_COUNT];
164
165 __le32 flash_address; /* 0x20 */
166 __le32 flash_data;
167 __le32 ctrl_status;
168
169 union {
170 struct {
171 __le32 nvram;
172 __le32 reserved1[2]; /* 0x30 */
173 } __attribute__ ((packed)) isp4010;
174 struct {
175 __le32 intr_mask;
176 __le32 nvram; /* 0x30 */
177 __le32 semaphore;
178 } __attribute__ ((packed)) isp4022;
179 } u1;
180
181 __le32 req_q_in; /* SCSI Request Queue Producer Index */
182 __le32 rsp_q_out; /* SCSI Completion Queue Consumer Index */
183
184 __le32 reserved2[4]; /* 0x40 */
185
186 union {
187 struct {
188 __le32 ext_hw_conf; /* 0x50 */
189 __le32 flow_ctrl;
190 __le32 port_ctrl;
191 __le32 port_status;
192
193 __le32 reserved3[8]; /* 0x60 */
194
195 __le32 req_q_out; /* 0x80 */
196
197 __le32 reserved4[23]; /* 0x84 */
198
199 __le32 gp_out; /* 0xe0 */
200 __le32 gp_in;
201
202 __le32 reserved5[5];
203
204 __le32 port_err_status; /* 0xfc */
205 } __attribute__ ((packed)) isp4010;
206 struct {
207 union {
208 struct port_ctrl_stat_regs p0;
209 struct host_mem_cfg_regs p1;
210 struct local_ram_cfg_regs p2;
211 struct prot_stat_regs p3;
212 __le32 r_union[44];
213 };
214
215 } __attribute__ ((packed)) isp4022;
216 } u2;
217}; /* 256 x100 */
218
219
220/* Semaphore Defines for 4010 */
221#define QL4010_DRVR_SEM_BITS 0x00000030
222#define QL4010_GPIO_SEM_BITS 0x000000c0
223#define QL4010_SDRAM_SEM_BITS 0x00000300
224#define QL4010_PHY_SEM_BITS 0x00000c00
225#define QL4010_NVRAM_SEM_BITS 0x00003000
226#define QL4010_FLASH_SEM_BITS 0x0000c000
227
228#define QL4010_DRVR_SEM_MASK 0x00300000
229#define QL4010_GPIO_SEM_MASK 0x00c00000
230#define QL4010_SDRAM_SEM_MASK 0x03000000
231#define QL4010_PHY_SEM_MASK 0x0c000000
232#define QL4010_NVRAM_SEM_MASK 0x30000000
233#define QL4010_FLASH_SEM_MASK 0xc0000000
234
235/* Semaphore Defines for 4022 */
236#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
237#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
238
239
240#define QL4022_DRVR_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
241#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
242#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
243#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
244#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
245
246
247
248/* Page # defines for 4022 */
249#define PORT_CTRL_STAT_PAGE 0 /* 4022 */
250#define HOST_MEM_CFG_PAGE 1 /* 4022 */
251#define LOCAL_RAM_CFG_PAGE 2 /* 4022 */
252#define PROT_STAT_PAGE 3 /* 4022 */
253
254/* Register Mask - sets corresponding mask bits in the upper word */
255static inline uint32_t set_rmask(uint32_t val)
256{
257 return (val & 0xffff) | (val << 16);
258}
259
260
261static inline uint32_t clr_rmask(uint32_t val)
262{
263 return 0 | (val << 16);
264}
265
266/* ctrl_status definitions */
267#define CSR_SCSI_PAGE_SELECT 0x00000003
268#define CSR_SCSI_INTR_ENABLE 0x00000004 /* 4010 */
269#define CSR_SCSI_RESET_INTR 0x00000008
270#define CSR_SCSI_COMPLETION_INTR 0x00000010
271#define CSR_SCSI_PROCESSOR_INTR 0x00000020
272#define CSR_INTR_RISC 0x00000040
273#define CSR_BOOT_ENABLE 0x00000080
274#define CSR_NET_PAGE_SELECT 0x00000300 /* 4010 */
275#define CSR_FUNC_NUM 0x00000700 /* 4022 */
276#define CSR_NET_RESET_INTR 0x00000800 /* 4010 */
277#define CSR_FORCE_SOFT_RESET 0x00002000 /* 4022 */
278#define CSR_FATAL_ERROR 0x00004000
279#define CSR_SOFT_RESET 0x00008000
280#define ISP_CONTROL_FN_MASK CSR_FUNC_NUM
281#define ISP_CONTROL_FN0_SCSI 0x0500
282#define ISP_CONTROL_FN1_SCSI 0x0700
283
284#define INTR_PENDING (CSR_SCSI_COMPLETION_INTR |\
285 CSR_SCSI_PROCESSOR_INTR |\
286 CSR_SCSI_RESET_INTR)
287
288/* ISP InterruptMask definitions */
289#define IMR_SCSI_INTR_ENABLE 0x00000004 /* 4022 */
290
291/* ISP 4022 nvram definitions */
292#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
293
294/* ISP port_status definitions */
295
296/* ISP Semaphore definitions */
297
298/* ISP General Purpose Output definitions */
299#define GPOR_TOPCAT_RESET 0x00000004
300
301/* shadow registers (DMA'd from HA to system memory. read only) */
302struct shadow_regs {
303 /* SCSI Request Queue Consumer Index */
304 __le32 req_q_out; /* 0 x0 R */
305
306 /* SCSI Completion Queue Producer Index */
307 __le32 rsp_q_in; /* 4 x4 R */
308}; /* 8 x8 */
309
310
311/* External hardware configuration register */
312union external_hw_config_reg {
313 struct {
314 /* FIXME: Do we even need this? All values are
315 * referred to by 16 bit quantities. Platform and
316 * endianess issues. */
317 __le32 bReserved0:1;
318 __le32 bSDRAMProtectionMethod:2;
319 __le32 bSDRAMBanks:1;
320 __le32 bSDRAMChipWidth:1;
321 __le32 bSDRAMChipSize:2;
322 __le32 bParityDisable:1;
323 __le32 bExternalMemoryType:1;
324 __le32 bFlashBIOSWriteEnable:1;
325 __le32 bFlashUpperBankSelect:1;
326 __le32 bWriteBurst:2;
327 __le32 bReserved1:3;
328 __le32 bMask:16;
329 };
330 uint32_t Asuint32_t;
331};
332
333/*************************************************************************
334 *
335 * Mailbox Commands Structures and Definitions
336 *
337 *************************************************************************/
338
339/* Mailbox command definitions */
340#define MBOX_CMD_ABOUT_FW 0x0009
341#define MBOX_CMD_LUN_RESET 0x0016
342#define MBOX_CMD_GET_FW_STATUS 0x001F
343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
344#define ISNS_DISABLE 0
345#define ISNS_ENABLE 1
346#define MBOX_CMD_READ_FLASH 0x0026
347#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
348#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
349#define LOGOUT_OPTION_CLOSE_SESSION 0x01
350#define LOGOUT_OPTION_RELOGIN 0x02
351#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
352#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
353#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
354#define MBOX_CMD_REQUEST_DATABASE_ENTRY 0x0062
355#define MBOX_CMD_SET_DATABASE_ENTRY 0x0063
356#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
357#define DDB_DS_UNASSIGNED 0x00
358#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
359#define DDB_DS_SESSION_ACTIVE 0x04
360#define DDB_DS_SESSION_FAILED 0x06
361#define DDB_DS_LOGIN_IN_PROCESS 0x07
362#define MBOX_CMD_GET_FW_STATE 0x0069
363
364/* Mailbox 1 */
365#define FW_STATE_READY 0x0000
366#define FW_STATE_CONFIG_WAIT 0x0001
367#define FW_STATE_ERROR 0x0004
368#define FW_STATE_DHCP_IN_PROGRESS 0x0008
369
370/* Mailbox 3 */
371#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
372#define FW_ADDSTATE_DHCP_ENABLED 0x0002
373#define FW_ADDSTATE_LINK_UP 0x0010
374#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
375#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
376#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
377#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
378#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
379
380/* Mailbox status definitions */
381#define MBOX_COMPLETION_STATUS 4
382#define MBOX_STS_BUSY 0x0007
383#define MBOX_STS_INTERMEDIATE_COMPLETION 0x1000
384#define MBOX_STS_COMMAND_COMPLETE 0x4000
385#define MBOX_STS_COMMAND_ERROR 0x4005
386
387#define MBOX_ASYNC_EVENT_STATUS 8
388#define MBOX_ASTS_SYSTEM_ERROR 0x8002
389#define MBOX_ASTS_REQUEST_TRANSFER_ERROR 0x8003
390#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR 0x8004
391#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM 0x8005
392#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED 0x8006
393#define MBOX_ASTS_LINK_UP 0x8010
394#define MBOX_ASTS_LINK_DOWN 0x8011
395#define MBOX_ASTS_DATABASE_CHANGED 0x8014
396#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED 0x8015
397#define MBOX_ASTS_SELF_TEST_FAILED 0x8016
398#define MBOX_ASTS_LOGIN_FAILED 0x8017
399#define MBOX_ASTS_DNS 0x8018
400#define MBOX_ASTS_HEARTBEAT 0x8019
401#define MBOX_ASTS_NVRAM_INVALID 0x801A
402#define MBOX_ASTS_MAC_ADDRESS_CHANGED 0x801B
403#define MBOX_ASTS_IP_ADDRESS_CHANGED 0x801C
404#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
405#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
406#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
407#define ISNS_EVENT_DATA_RECEIVED 0x0000
408#define ISNS_EVENT_CONNECTION_OPENED 0x0001
409#define ISNS_EVENT_CONNECTION_FAILED 0x0002
410#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
411#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
412
413/*************************************************************************/
414
415/* Host Adapter Initialization Control Block (from host) */
416struct init_fw_ctrl_blk {
417 uint8_t Version; /* 00 */
418 uint8_t Control; /* 01 */
419
420 uint16_t FwOptions; /* 02-03 */
421#define FWOPT_HEARTBEAT_ENABLE 0x1000
422#define FWOPT_SESSION_MODE 0x0040
423#define FWOPT_INITIATOR_MODE 0x0020
424#define FWOPT_TARGET_MODE 0x0010
425
426 uint16_t ExecThrottle; /* 04-05 */
427 uint8_t RetryCount; /* 06 */
428 uint8_t RetryDelay; /* 07 */
429 uint16_t MaxEthFrPayloadSize; /* 08-09 */
430 uint16_t AddFwOptions; /* 0A-0B */
431
432 uint8_t HeartbeatInterval; /* 0C */
433 uint8_t InstanceNumber; /* 0D */
434 uint16_t RES2; /* 0E-0F */
435 uint16_t ReqQConsumerIndex; /* 10-11 */
436 uint16_t ComplQProducerIndex; /* 12-13 */
437 uint16_t ReqQLen; /* 14-15 */
438 uint16_t ComplQLen; /* 16-17 */
439 uint32_t ReqQAddrLo; /* 18-1B */
440 uint32_t ReqQAddrHi; /* 1C-1F */
441 uint32_t ComplQAddrLo; /* 20-23 */
442 uint32_t ComplQAddrHi; /* 24-27 */
443 uint32_t ShadowRegBufAddrLo; /* 28-2B */
444 uint32_t ShadowRegBufAddrHi; /* 2C-2F */
445
446 uint16_t iSCSIOptions; /* 30-31 */
447
448 uint16_t TCPOptions; /* 32-33 */
449
450 uint16_t IPOptions; /* 34-35 */
451
452 uint16_t MaxPDUSize; /* 36-37 */
453 uint16_t RcvMarkerInt; /* 38-39 */
454 uint16_t SndMarkerInt; /* 3A-3B */
455 uint16_t InitMarkerlessInt; /* 3C-3D */
456 uint16_t FirstBurstSize; /* 3E-3F */
457 uint16_t DefaultTime2Wait; /* 40-41 */
458 uint16_t DefaultTime2Retain; /* 42-43 */
459 uint16_t MaxOutStndngR2T; /* 44-45 */
460 uint16_t KeepAliveTimeout; /* 46-47 */
461 uint16_t PortNumber; /* 48-49 */
462 uint16_t MaxBurstSize; /* 4A-4B */
463 uint32_t RES4; /* 4C-4F */
464 uint8_t IPAddr[4]; /* 50-53 */
465 uint8_t RES5[12]; /* 54-5F */
466 uint8_t SubnetMask[4]; /* 60-63 */
467 uint8_t RES6[12]; /* 64-6F */
468 uint8_t GatewayIPAddr[4]; /* 70-73 */
469 uint8_t RES7[12]; /* 74-7F */
470 uint8_t PriDNSIPAddr[4]; /* 80-83 */
471 uint8_t SecDNSIPAddr[4]; /* 84-87 */
472 uint8_t RES8[8]; /* 88-8F */
473 uint8_t Alias[32]; /* 90-AF */
474 uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */
475 uint8_t CHAPNameSecretsTable[8]; /* B8-BF */
476 uint8_t EthernetMACAddr[6]; /* C0-C5 */
477 uint16_t TargetPortalGroup; /* C6-C7 */
478 uint8_t SendScale; /* C8 */
479 uint8_t RecvScale; /* C9 */
480 uint8_t TypeOfService; /* CA */
481 uint8_t Time2Live; /* CB */
482 uint16_t VLANPriority; /* CC-CD */
483 uint16_t Reserved8; /* CE-CF */
484 uint8_t SecIPAddr[4]; /* D0-D3 */
485 uint8_t Reserved9[12]; /* D4-DF */
486 uint8_t iSNSIPAddr[4]; /* E0-E3 */
487 uint16_t iSNSServerPortNumber; /* E4-E5 */
488 uint8_t Reserved10[10]; /* E6-EF */
489 uint8_t SLPDAIPAddr[4]; /* F0-F3 */
490 uint8_t Reserved11[12]; /* F4-FF */
491 uint8_t iSCSINameString[256]; /* 100-1FF */
492};
493
494/*************************************************************************/
495
496struct dev_db_entry {
497 uint8_t options; /* 00 */
498#define DDB_OPT_DISC_SESSION 0x10
499#define DDB_OPT_TARGET 0x02 /* device is a target */
500
501 uint8_t control; /* 01 */
502
503 uint16_t exeThrottle; /* 02-03 */
504 uint16_t exeCount; /* 04-05 */
505 uint8_t retryCount; /* 06 */
506 uint8_t retryDelay; /* 07 */
507 uint16_t iSCSIOptions; /* 08-09 */
508
509 uint16_t TCPOptions; /* 0A-0B */
510
511 uint16_t IPOptions; /* 0C-0D */
512
513 uint16_t maxPDUSize; /* 0E-0F */
514 uint16_t rcvMarkerInt; /* 10-11 */
515 uint16_t sndMarkerInt; /* 12-13 */
516 uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
517 uint16_t firstBurstSize; /* 16-17 */
518 uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
519 uint16_t maxTime2Retain; /* 1A-1B */
520 uint16_t maxOutstndngR2T; /* 1C-1D */
521 uint16_t keepAliveTimeout; /* 1E-1F */
522 uint8_t ISID[6]; /* 20-25 big-endian, must be converted
523 * to little-endian */
524 uint16_t TSID; /* 26-27 */
525 uint16_t portNumber; /* 28-29 */
526 uint16_t maxBurstSize; /* 2A-2B */
527 uint16_t taskMngmntTimeout; /* 2C-2D */
528 uint16_t reserved1; /* 2E-2F */
529 uint8_t ipAddr[0x10]; /* 30-3F */
530 uint8_t iSCSIAlias[0x20]; /* 40-5F */
531 uint8_t targetAddr[0x20]; /* 60-7F */
532 uint8_t userID[0x20]; /* 80-9F */
533 uint8_t password[0x20]; /* A0-BF */
534 uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a
535 * pointer to a string so we
536 * don't have to reserve soooo
537 * much RAM */
538 uint16_t ddbLink; /* 1C0-1C1 */
539 uint16_t CHAPTableIndex; /* 1C2-1C3 */
540 uint16_t TargetPortalGroup; /* 1C4-1C5 */
541 uint16_t reserved2[2]; /* 1C6-1C7 */
542 uint32_t statSN; /* 1C8-1CB */
543 uint32_t expStatSN; /* 1CC-1CF */
544 uint16_t reserved3[0x2C]; /* 1D0-1FB */
545 uint16_t ddbValidCookie; /* 1FC-1FD */
546 uint16_t ddbValidSize; /* 1FE-1FF */
547};
548
549/*************************************************************************/
550
551/* Flash definitions */
552
553#define FLASH_OFFSET_SYS_INFO 0x02000000
554#define FLASH_DEFAULTBLOCKSIZE 0x20000
555#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
556 * for EOF
557 * signature */
558
559struct sys_info_phys_addr {
560 uint8_t address[6]; /* 00-05 */
561 uint8_t filler[2]; /* 06-07 */
562};
563
564struct flash_sys_info {
565 uint32_t cookie; /* 00-03 */
566 uint32_t physAddrCount; /* 04-07 */
567 struct sys_info_phys_addr physAddr[4]; /* 08-27 */
568 uint8_t vendorId[128]; /* 28-A7 */
569 uint8_t productId[128]; /* A8-127 */
570 uint32_t serialNumber; /* 128-12B */
571
572 /* PCI Configuration values */
573 uint32_t pciDeviceVendor; /* 12C-12F */
574 uint32_t pciDeviceId; /* 130-133 */
575 uint32_t pciSubsysVendor; /* 134-137 */
576 uint32_t pciSubsysId; /* 138-13B */
577
578 /* This validates version 1. */
579 uint32_t crumbs; /* 13C-13F */
580
581 uint32_t enterpriseNumber; /* 140-143 */
582
583 uint32_t mtu; /* 144-147 */
584 uint32_t reserved0; /* 148-14b */
585 uint32_t crumbs2; /* 14c-14f */
586 uint8_t acSerialNumber[16]; /* 150-15f */
587 uint32_t crumbs3; /* 160-16f */
588
589 /* Leave this last in the struct so it is declared invalid if
590 * any new items are added.
591 */
592 uint32_t reserved1[39]; /* 170-1ff */
593}; /* 200 */
594
595struct crash_record {
596 uint16_t fw_major_version; /* 00 - 01 */
597 uint16_t fw_minor_version; /* 02 - 03 */
598 uint16_t fw_patch_version; /* 04 - 05 */
599 uint16_t fw_build_version; /* 06 - 07 */
600
601 uint8_t build_date[16]; /* 08 - 17 */
602 uint8_t build_time[16]; /* 18 - 27 */
603 uint8_t build_user[16]; /* 28 - 37 */
604 uint8_t card_serial_num[16]; /* 38 - 47 */
605
606 uint32_t time_of_crash_in_secs; /* 48 - 4B */
607 uint32_t time_of_crash_in_ms; /* 4C - 4F */
608
609 uint16_t out_RISC_sd_num_frames; /* 50 - 51 */
610 uint16_t OAP_sd_num_words; /* 52 - 53 */
611 uint16_t IAP_sd_num_frames; /* 54 - 55 */
612 uint16_t in_RISC_sd_num_words; /* 56 - 57 */
613
614 uint8_t reserved1[28]; /* 58 - 7F */
615
616 uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
617 uint8_t in_RISC_reg_dump[256]; /*180 -27F */
618 uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
619};
620
621struct conn_event_log_entry {
622#define MAX_CONN_EVENT_LOG_ENTRIES 100
623 uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
624 uint32_t timestamp_ms; /* 04 - 07 milliseconds since boot */
625 uint16_t device_index; /* 08 - 09 */
626 uint16_t fw_conn_state; /* 0A - 0B */
627 uint8_t event_type; /* 0C - 0C */
628 uint8_t error_code; /* 0D - 0D */
629 uint16_t error_code_detail; /* 0E - 0F */
630 uint8_t num_consecutive_events; /* 10 - 10 */
631 uint8_t rsvd[3]; /* 11 - 13 */
632};
633
634/*************************************************************************
635 *
636 * IOCB Commands Structures and Definitions
637 *
638 *************************************************************************/
639#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
640#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
641
642/* IOCB header structure */
643struct qla4_header {
644 uint8_t entryType;
645#define ET_STATUS 0x03
646#define ET_MARKER 0x04
647#define ET_CONT_T1 0x0A
648#define ET_STATUS_CONTINUATION 0x10
649#define ET_CMND_T3 0x19
650#define ET_PASSTHRU0 0x3A
651#define ET_PASSTHRU_STATUS 0x3C
652
653 uint8_t entryStatus;
654 uint8_t systemDefined;
655 uint8_t entryCount;
656
657 /* SyetemDefined definition */
658};
659
660/* Generic queue entry structure*/
661struct queue_entry {
662 uint8_t data[60];
663 uint32_t signature;
664
665};
666
667/* 64 bit addressing segment counts*/
668
669#define COMMAND_SEG_A64 1
670#define CONTINUE_SEG_A64 5
671
672/* 64 bit addressing segment definition*/
673
674struct data_seg_a64 {
675 struct {
676 uint32_t addrLow;
677 uint32_t addrHigh;
678
679 } base;
680
681 uint32_t count;
682
683};
684
685/* Command Type 3 entry structure*/
686
687struct command_t3_entry {
688 struct qla4_header hdr; /* 00-03 */
689
690 uint32_t handle; /* 04-07 */
691 uint16_t target; /* 08-09 */
692 uint16_t connection_id; /* 0A-0B */
693
694 uint8_t control_flags; /* 0C */
695
696 /* data direction (bits 5-6) */
697#define CF_WRITE 0x20
698#define CF_READ 0x40
699#define CF_NO_DATA 0x00
700
701 /* task attributes (bits 2-0) */
702#define CF_HEAD_TAG 0x03
703#define CF_ORDERED_TAG 0x02
704#define CF_SIMPLE_TAG 0x01
705
706 /* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
707 * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
708 * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
709 * PROPERLY.
710 */
711 uint8_t state_flags; /* 0D */
712 uint8_t cmdRefNum; /* 0E */
713 uint8_t reserved1; /* 0F */
714 uint8_t cdb[IOCB_MAX_CDB_LEN]; /* 10-1F */
715 struct scsi_lun lun; /* FCP LUN (BE). */
716 uint32_t cmdSeqNum; /* 28-2B */
717 uint16_t timeout; /* 2C-2D */
718 uint16_t dataSegCnt; /* 2E-2F */
719 uint32_t ttlByteCnt; /* 30-33 */
720 struct data_seg_a64 dataseg[COMMAND_SEG_A64]; /* 34-3F */
721
722};
723
724
725/* Continuation Type 1 entry structure*/
726struct continuation_t1_entry {
727 struct qla4_header hdr;
728
729 struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
730
731};
732
733/* Parameterize for 64 or 32 bits */
734#define COMMAND_SEG COMMAND_SEG_A64
735#define CONTINUE_SEG CONTINUE_SEG_A64
736
737#define ET_COMMAND ET_CMND_T3
738#define ET_CONTINUE ET_CONT_T1
739
740/* Marker entry structure*/
741struct marker_entry {
742 struct qla4_header hdr; /* 00-03 */
743
744 uint32_t system_defined; /* 04-07 */
745 uint16_t target; /* 08-09 */
746 uint16_t modifier; /* 0A-0B */
747#define MM_LUN_RESET 0
748
749 uint16_t flags; /* 0C-0D */
750 uint16_t reserved1; /* 0E-0F */
751 struct scsi_lun lun; /* FCP LUN (BE). */
752 uint64_t reserved2; /* 18-1F */
753 uint64_t reserved3; /* 20-27 */
754 uint64_t reserved4; /* 28-2F */
755 uint64_t reserved5; /* 30-37 */
756 uint64_t reserved6; /* 38-3F */
757};
758
759/* Status entry structure*/
760struct status_entry {
761 struct qla4_header hdr; /* 00-03 */
762
763 uint32_t handle; /* 04-07 */
764
765 uint8_t scsiStatus; /* 08 */
766#define SCSI_CHECK_CONDITION 0x02
767
768 uint8_t iscsiFlags; /* 09 */
769#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
770#define ISCSI_FLAG_RESIDUAL_OVER 0x04
771
772 uint8_t iscsiResponse; /* 0A */
773
774 uint8_t completionStatus; /* 0B */
775#define SCS_COMPLETE 0x00
776#define SCS_INCOMPLETE 0x01
777#define SCS_RESET_OCCURRED 0x04
778#define SCS_ABORTED 0x05
779#define SCS_TIMEOUT 0x06
780#define SCS_DATA_OVERRUN 0x07
781#define SCS_DATA_UNDERRUN 0x15
782#define SCS_QUEUE_FULL 0x1C
783#define SCS_DEVICE_UNAVAILABLE 0x28
784#define SCS_DEVICE_LOGGED_OUT 0x29
785
786 uint8_t reserved1; /* 0C */
787
788 /* state_flags MUST be at the same location as state_flags in
789 * the Command_T3/4_Entry */
790 uint8_t state_flags; /* 0D */
791
792 uint16_t senseDataByteCnt; /* 0E-0F */
793 uint32_t residualByteCnt; /* 10-13 */
794 uint32_t bidiResidualByteCnt; /* 14-17 */
795 uint32_t expSeqNum; /* 18-1B */
796 uint32_t maxCmdSeqNum; /* 1C-1F */
797 uint8_t senseData[IOCB_MAX_SENSEDATA_LEN]; /* 20-3F */
798
799};
800
801struct passthru0 {
802 struct qla4_header hdr; /* 00-03 */
803 uint32_t handle; /* 04-07 */
804 uint16_t target; /* 08-09 */
805 uint16_t connectionID; /* 0A-0B */
806#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
807
808 uint16_t controlFlags; /* 0C-0D */
809#define PT_FLAG_ETHERNET_FRAME 0x8000
810#define PT_FLAG_ISNS_PDU 0x8000
811#define PT_FLAG_SEND_BUFFER 0x0200
812#define PT_FLAG_WAIT_4_RESPONSE 0x0100
813
814 uint16_t timeout; /* 0E-0F */
815#define PT_DEFAULT_TIMEOUT 30 /* seconds */
816
817 struct data_seg_a64 outDataSeg64; /* 10-1B */
818 uint32_t res1; /* 1C-1F */
819 struct data_seg_a64 inDataSeg64; /* 20-2B */
820 uint8_t res2[20]; /* 2C-3F */
821};
822
823struct passthru_status {
824 struct qla4_header hdr; /* 00-03 */
825 uint32_t handle; /* 04-07 */
826 uint16_t target; /* 08-09 */
827 uint16_t connectionID; /* 0A-0B */
828
829 uint8_t completionStatus; /* 0C */
830#define PASSTHRU_STATUS_COMPLETE 0x01
831
832 uint8_t residualFlags; /* 0D */
833
834 uint16_t timeout; /* 0E-0F */
835 uint16_t portNumber; /* 10-11 */
836 uint8_t res1[10]; /* 12-1B */
837 uint32_t outResidual; /* 1C-1F */
838 uint8_t res2[12]; /* 20-2B */
839 uint32_t inResidual; /* 2C-2F */
840 uint8_t res4[16]; /* 30-3F */
841};
842
843#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
new file mode 100644
index 00000000000..418fb7a13a6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -0,0 +1,78 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H
10
11int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
12int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
13int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
14 uint8_t renew_ddb_list);
15int qla4xxx_soft_reset(struct scsi_qla_host *ha);
16irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs);
17
18void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
19void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
20
21int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
22int qla4xxx_relogin_device(struct scsi_qla_host * ha,
23 struct ddb_entry * ddb_entry);
24int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
25 int lun);
26int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
27 uint32_t offset, uint32_t len);
28int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
29int qla4xxx_get_firmware_state(struct scsi_qla_host * ha);
30int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha);
31
32/* FIXME: Goodness! this really wants a small struct to hold the
33 * parameters. On x86 the args will get passed on the stack! */
34int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
35 uint16_t fw_ddb_index,
36 struct dev_db_entry *fw_ddb_entry,
37 dma_addr_t fw_ddb_entry_dma,
38 uint32_t *num_valid_ddb_entries,
39 uint32_t *next_ddb_index,
40 uint32_t *fw_ddb_device_state,
41 uint32_t *conn_err_detail,
42 uint16_t *tcp_source_port_num,
43 uint16_t *connection_id);
44
45struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
46 uint32_t fw_ddb_index);
47int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
48 dma_addr_t fw_ddb_entry_dma);
49
50void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
51 struct ddb_entry *ddb_entry);
52u16 rd_nvram_word(struct scsi_qla_host * ha, int offset);
53void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
54struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
55int qla4xxx_add_sess(struct ddb_entry *);
56void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
57int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
58 uint16_t fw_ddb_index,
59 uint16_t connection_id,
60 uint16_t option);
61int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
62 uint16_t fw_ddb_index);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
64int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
66 uint32_t intr_status);
67int qla4xxx_init_rings(struct scsi_qla_host * ha);
68void qla4xxx_dump_buffer(void *b, uint32_t size);
69struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
70void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
71int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
72int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
73 uint32_t fw_ddb_index, uint32_t state);
74
75extern int extended_error_logging;
76extern int ql4xdiscoverywait;
77extern int ql4xdontresethba;
78#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
new file mode 100644
index 00000000000..bb3a1c11f44
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -0,0 +1,1340 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10/*
11 * QLogic ISP4xxx Hardware Support Function Prototypes.
12 */
13
14static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
15{
16 uint32_t value;
17 uint8_t func_number;
18 unsigned long flags;
19
20 /* Get the function number */
21 spin_lock_irqsave(&ha->hardware_lock, flags);
22 value = readw(&ha->reg->ctrl_status);
23 spin_unlock_irqrestore(&ha->hardware_lock, flags);
24
25 func_number = (uint8_t) ((value >> 4) & 0x30);
26 switch (value & ISP_CONTROL_FN_MASK) {
27 case ISP_CONTROL_FN0_SCSI:
28 ha->mac_index = 1;
29 break;
30 case ISP_CONTROL_FN1_SCSI:
31 ha->mac_index = 3;
32 break;
33 default:
34 DEBUG2(printk("scsi%ld: %s: Invalid function number, "
35 "ispControlStatus = 0x%x\n", ha->host_no,
36 __func__, value));
37 break;
38 }
39 DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
40 ha->mac_index));
41}
42
43/**
44 * qla4xxx_free_ddb - deallocate ddb
45 * @ha: pointer to host adapter structure.
46 * @ddb_entry: pointer to device database entry
47 *
48 * This routine deallocates and unlinks the specified ddb_entry from the
49 * adapter's
50 **/
51void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry)
52{
53 /* Remove device entry from list */
54 list_del_init(&ddb_entry->list);
55
56 /* Remove device pointer from index mapping arrays */
57 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
58 (struct ddb_entry *) INVALID_ENTRY;
59 ha->tot_ddbs--;
60
61 /* Free memory and scsi-ml struct for device entry */
62 qla4xxx_destroy_sess(ddb_entry);
63}
64
65/**
66 * qla4xxx_free_ddb_list - deallocate all ddbs
67 * @ha: pointer to host adapter structure.
68 *
69 * This routine deallocates and removes all devices on the sppecified adapter.
70 **/
71void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
72{
73 struct list_head *ptr;
74 struct ddb_entry *ddb_entry;
75
76 while (!list_empty(&ha->ddb_list)) {
77 ptr = ha->ddb_list.next;
78 /* Free memory for device entry and remove */
79 ddb_entry = list_entry(ptr, struct ddb_entry, list);
80 qla4xxx_free_ddb(ha, ddb_entry);
81 }
82}
83
84/**
85 * qla4xxx_init_rings - initialize hw queues
86 * @ha: pointer to host adapter structure.
87 *
88 * This routine initializes the internal queues for the specified adapter.
89 * The QLA4010 requires us to restart the queues at index 0.
90 * The QLA4000 doesn't care, so just default to QLA4010's requirement.
91 **/
92int qla4xxx_init_rings(struct scsi_qla_host *ha)
93{
94 unsigned long flags = 0;
95
96 /* Initialize request queue. */
97 spin_lock_irqsave(&ha->hardware_lock, flags);
98 ha->request_out = 0;
99 ha->request_in = 0;
100 ha->request_ptr = &ha->request_ring[ha->request_in];
101 ha->req_q_count = REQUEST_QUEUE_DEPTH;
102
103 /* Initialize response queue. */
104 ha->response_in = 0;
105 ha->response_out = 0;
106 ha->response_ptr = &ha->response_ring[ha->response_out];
107
108 /*
109 * Initialize DMA Shadow registers. The firmware is really supposed to
110 * take care of this, but on some uniprocessor systems, the shadow
111 * registers aren't cleared-- causing the interrupt_handler to think
112 * there are responses to be processed when there aren't.
113 */
114 ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
115 ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
116 wmb();
117
118 writel(0, &ha->reg->req_q_in);
119 writel(0, &ha->reg->rsp_q_out);
120 readl(&ha->reg->rsp_q_out);
121
122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
123
124 return QLA_SUCCESS;
125}
126
127/**
128 * qla4xxx_validate_mac_address - validate adapter MAC address(es)
129 * @ha: pointer to host adapter structure.
130 *
131 **/
132static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
133{
134 struct flash_sys_info *sys_info;
135 dma_addr_t sys_info_dma;
136 int status = QLA_ERROR;
137
138 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
139 &sys_info_dma, GFP_KERNEL);
140 if (sys_info == NULL) {
141 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
142 ha->host_no, __func__));
143
144 goto exit_validate_mac_no_free;
145 }
146 memset(sys_info, 0, sizeof(*sys_info));
147
148 /* Get flash sys info */
149 if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
150 sizeof(*sys_info)) != QLA_SUCCESS) {
151 DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
152 "failed\n", ha->host_no, __func__));
153
154 goto exit_validate_mac;
155 }
156
157 /* Save M.A.C. address & serial_number */
158 memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
159 min(sizeof(ha->my_mac),
160 sizeof(sys_info->physAddr[0].address)));
161 memcpy(ha->serial_number, &sys_info->acSerialNumber,
162 min(sizeof(ha->serial_number),
163 sizeof(sys_info->acSerialNumber)));
164
165 status = QLA_SUCCESS;
166
167 exit_validate_mac:
168 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
169 sys_info_dma);
170
171 exit_validate_mac_no_free:
172 return status;
173}
174
175/**
176 * qla4xxx_init_local_data - initialize adapter specific local data
177 * @ha: pointer to host adapter structure.
178 *
179 **/
180static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
181{
182 /* Initilize aen queue */
183 ha->aen_q_count = MAX_AEN_ENTRIES;
184
185 return qla4xxx_get_firmware_status(ha);
186}
187
188static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
189{
190 uint32_t timeout_count;
191 int ready = 0;
192
193 DEBUG2(dev_info(&ha->pdev->dev, "Waiting for Firmware Ready..\n"));
194 for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
195 timeout_count--) {
196 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
197 qla4xxx_get_dhcp_ip_address(ha);
198
199 /* Get firmware state. */
200 if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
201 DEBUG2(printk("scsi%ld: %s: unable to get firmware "
202 "state\n", ha->host_no, __func__));
203 break;
204
205 }
206
207 if (ha->firmware_state & FW_STATE_ERROR) {
208 DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
209 " occurred\n", ha->host_no, __func__));
210 break;
211
212 }
213 if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
214 /*
215 * The firmware has not yet been issued an Initialize
216 * Firmware command, so issue it now.
217 */
218 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
219 break;
220
221 /* Go back and test for ready state - no wait. */
222 continue;
223 }
224
225 if (ha->firmware_state == FW_STATE_READY) {
226 DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
227 /* The firmware is ready to process SCSI commands. */
228 DEBUG2(dev_info(&ha->pdev->dev,
229 "scsi%ld: %s: MEDIA TYPE - %s\n",
230 ha->host_no,
231 __func__, (ha->addl_fw_state &
232 FW_ADDSTATE_OPTICAL_MEDIA)
233 != 0 ? "OPTICAL" : "COPPER"));
234 DEBUG2(dev_info(&ha->pdev->dev,
235 "scsi%ld: %s: DHCP STATE Enabled "
236 "%s\n",
237 ha->host_no, __func__,
238 (ha->addl_fw_state &
239 FW_ADDSTATE_DHCP_ENABLED) != 0 ?
240 "YES" : "NO"));
241 DEBUG2(dev_info(&ha->pdev->dev,
242 "scsi%ld: %s: LINK %s\n",
243 ha->host_no, __func__,
244 (ha->addl_fw_state &
245 FW_ADDSTATE_LINK_UP) != 0 ?
246 "UP" : "DOWN"));
247 DEBUG2(dev_info(&ha->pdev->dev,
248 "scsi%ld: %s: iSNS Service "
249 "Started %s\n",
250 ha->host_no, __func__,
251 (ha->addl_fw_state &
252 FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
253 "YES" : "NO"));
254
255 ready = 1;
256 break;
257 }
258 DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
259 "seconds expired= %d\n", ha->host_no, __func__,
260 ha->firmware_state, ha->addl_fw_state,
261 timeout_count));
262 msleep(1000);
263 } /* end of for */
264
265 if (timeout_count <= 0)
266 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
267 ha->host_no, __func__));
268
269 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) {
270 DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
271 " grab an IP address from DHCP server\n",
272 ha->host_no, __func__));
273 ready = 1;
274 }
275
276 return ready;
277}
278
279/**
280 * qla4xxx_init_firmware - initializes the firmware.
281 * @ha: pointer to host adapter structure.
282 *
283 **/
284static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
285{
286 int status = QLA_ERROR;
287
288 dev_info(&ha->pdev->dev, "Initializing firmware..\n");
289 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
290 DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
291 "control block\n", ha->host_no, __func__));
292 return status;
293 }
294 if (!qla4xxx_fw_ready(ha))
295 return status;
296
297 set_bit(AF_ONLINE, &ha->flags);
298 return qla4xxx_get_firmware_status(ha);
299}
300
301static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
302 uint32_t fw_ddb_index)
303{
304 struct dev_db_entry *fw_ddb_entry = NULL;
305 dma_addr_t fw_ddb_entry_dma;
306 struct ddb_entry *ddb_entry = NULL;
307 int found = 0;
308 uint32_t device_state;
309
310 /* Make sure the dma buffer is valid */
311 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
312 sizeof(*fw_ddb_entry),
313 &fw_ddb_entry_dma, GFP_KERNEL);
314 if (fw_ddb_entry == NULL) {
315 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
316 ha->host_no, __func__));
317 return NULL;
318 }
319
320 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
321 fw_ddb_entry_dma, NULL, NULL,
322 &device_state, NULL, NULL, NULL) ==
323 QLA_ERROR) {
324 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
325 "fw_ddb_index %d\n", ha->host_no, __func__,
326 fw_ddb_index));
327 return NULL;
328 }
329
330 /* Allocate DDB if not already allocated. */
331 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
332 __func__, fw_ddb_index));
333 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
334 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName,
335 ISCSI_NAME_SIZE) == 0) {
336 found++;
337 break;
338 }
339 }
340
341 if (!found) {
342 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
343 "new ddb\n", ha->host_no, __func__,
344 fw_ddb_index));
345 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
346 }
347
348 /* if not found allocate new ddb */
349 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
350 fw_ddb_entry_dma);
351
352 return ddb_entry;
353}
354
355/**
356 * qla4xxx_update_ddb_entry - update driver's internal ddb
357 * @ha: pointer to host adapter structure.
358 * @ddb_entry: pointer to device database structure to be filled
359 * @fw_ddb_index: index of the ddb entry in fw ddb table
360 *
361 * This routine updates the driver's internal device database entry
362 * with information retrieved from the firmware's device database
363 * entry for the specified device. The ddb_entry->fw_ddb_index field
364 * must be initialized prior to calling this routine
365 *
366 **/
367int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
368 struct ddb_entry *ddb_entry,
369 uint32_t fw_ddb_index)
370{
371 struct dev_db_entry *fw_ddb_entry = NULL;
372 dma_addr_t fw_ddb_entry_dma;
373 int status = QLA_ERROR;
374
375 if (ddb_entry == NULL) {
376 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
377 __func__));
378 goto exit_update_ddb;
379 }
380
381 /* Make sure the dma buffer is valid */
382 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
383 sizeof(*fw_ddb_entry),
384 &fw_ddb_entry_dma, GFP_KERNEL);
385 if (fw_ddb_entry == NULL) {
386 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
387 ha->host_no, __func__));
388
389 goto exit_update_ddb;
390 }
391
392 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
393 fw_ddb_entry_dma, NULL, NULL,
394 &ddb_entry->fw_ddb_device_state, NULL,
395 &ddb_entry->tcp_source_port_num,
396 &ddb_entry->connection_id) ==
397 QLA_ERROR) {
398 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
399 "fw_ddb_index %d\n", ha->host_no, __func__,
400 fw_ddb_index));
401
402 goto exit_update_ddb;
403 }
404
405 status = QLA_SUCCESS;
406 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID);
407 ddb_entry->task_mgmt_timeout =
408 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
409 ddb_entry->CmdSn = 0;
410 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle);
411 ddb_entry->default_relogin_timeout =
412 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
413 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait);
414
415 /* Update index in case it changed */
416 ddb_entry->fw_ddb_index = fw_ddb_index;
417 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
418
419 ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber);
420 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup);
421 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0],
422 min(sizeof(ddb_entry->iscsi_name),
423 sizeof(fw_ddb_entry->iscsiName)));
424 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0],
425 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr)));
426
427 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
428 ha->host_no, __func__, fw_ddb_index,
429 ddb_entry->fw_ddb_device_state, status));
430
431 exit_update_ddb:
432 if (fw_ddb_entry)
433 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
434 fw_ddb_entry, fw_ddb_entry_dma);
435
436 return status;
437}
438
439/**
440 * qla4xxx_alloc_ddb - allocate device database entry
441 * @ha: Pointer to host adapter structure.
442 * @fw_ddb_index: Firmware's device database index
443 *
444 * This routine allocates a ddb_entry, ititializes some values, and
445 * inserts it into the ddb list.
446 **/
447struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
448 uint32_t fw_ddb_index)
449{
450 struct ddb_entry *ddb_entry;
451
452 DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no,
453 __func__, fw_ddb_index));
454
455 ddb_entry = qla4xxx_alloc_sess(ha);
456 if (ddb_entry == NULL) {
457 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
458 "to add fw_ddb_index [%d]\n",
459 ha->host_no, __func__, fw_ddb_index));
460 return ddb_entry;
461 }
462
463 ddb_entry->fw_ddb_index = fw_ddb_index;
464 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
465 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
466 atomic_set(&ddb_entry->relogin_timer, 0);
467 atomic_set(&ddb_entry->relogin_retry_count, 0);
468 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
469 list_add_tail(&ddb_entry->list, &ha->ddb_list);
470 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
471 ha->tot_ddbs++;
472
473 return ddb_entry;
474}
475
476/**
477 * qla4xxx_configure_ddbs - builds driver ddb list
478 * @ha: Pointer to host adapter structure.
479 *
480 * This routine searches for all valid firmware ddb entries and builds
481 * an internal ddb list. Ddbs that are considered valid are those with
482 * a device state of SESSION_ACTIVE.
483 **/
484static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
485{
486 int status = QLA_SUCCESS;
487 uint32_t fw_ddb_index = 0;
488 uint32_t next_fw_ddb_index = 0;
489 uint32_t ddb_state;
490 uint32_t conn_err, err_code;
491 struct ddb_entry *ddb_entry;
492
493 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
494 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
495 fw_ddb_index = next_fw_ddb_index) {
496 /* First, let's see if a device exists here */
497 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
498 &next_fw_ddb_index, &ddb_state,
499 &conn_err, NULL, NULL) ==
500 QLA_ERROR) {
501 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
502 "fw_ddb_index %d failed", ha->host_no,
503 __func__, fw_ddb_index));
504 return QLA_ERROR;
505 }
506
507 DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
508 "next_fw_ddb_index=%d.\n", ha->host_no, __func__,
509 fw_ddb_index, ddb_state, next_fw_ddb_index));
510
511 /* Issue relogin, if necessary. */
512 if (ddb_state == DDB_DS_SESSION_FAILED ||
513 ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) {
514 /* Try and login to device */
515 DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
516 ha->host_no, __func__, fw_ddb_index));
517 err_code = ((conn_err & 0x00ff0000) >> 16);
518 if (err_code == 0x1c || err_code == 0x06) {
519 DEBUG2(printk("scsi%ld: %s send target "
520 "completed "
521 "or access denied failure\n",
522 ha->host_no, __func__));
523 } else
524 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
525 }
526
527 if (ddb_state != DDB_DS_SESSION_ACTIVE)
528 goto next_one;
529 /*
530 * if fw_ddb with session active state found,
531 * add to ddb_list
532 */
533 DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n",
534 ha->host_no, __func__, fw_ddb_index));
535
536 /* Add DDB to internal our ddb list. */
537 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
538 if (ddb_entry == NULL) {
539 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
540 "for device at fw_ddb_index %d\n",
541 ha->host_no, __func__, fw_ddb_index));
542 return QLA_ERROR;
543 }
544 /* Fill in the device structure */
545 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
546 QLA_ERROR) {
547 ha->fw_ddb_index_map[fw_ddb_index] =
548 (struct ddb_entry *)INVALID_ENTRY;
549
550
551 DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
552 "for fw_ddb_index %d.\n",
553 ha->host_no, __func__, fw_ddb_index));
554 return QLA_ERROR;
555 }
556
557next_one:
558 /* We know we've reached the last device when
559 * next_fw_ddb_index is 0 */
560 if (next_fw_ddb_index == 0)
561 break;
562 }
563
564 dev_info(&ha->pdev->dev, "DDB list done..\n");
565
566 return status;
567}
568
569struct qla4_relog_scan {
570 int halt_wait;
571 uint32_t conn_err;
572 uint32_t err_code;
573 uint32_t fw_ddb_index;
574 uint32_t next_fw_ddb_index;
575 uint32_t fw_ddb_device_state;
576};
577
578static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
579{
580 struct ddb_entry *ddb_entry;
581
582 /*
583 * Don't want to do a relogin if connection
584 * error is 0x1c.
585 */
586 rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
587 if (rs->err_code == 0x1c || rs->err_code == 0x06) {
588 DEBUG2(printk(
589 "scsi%ld: %s send target"
590 " completed or "
591 "access denied failure\n",
592 ha->host_no, __func__));
593 } else {
594 /* We either have a device that is in
595 * the process of relogging in or a
596 * device that is waiting to be
597 * relogged in */
598 rs->halt_wait = 0;
599
600 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
601 rs->fw_ddb_index);
602 if (ddb_entry == NULL)
603 return QLA_ERROR;
604
605 if (ddb_entry->dev_scan_wait_to_start_relogin != 0
606 && time_after_eq(jiffies,
607 ddb_entry->
608 dev_scan_wait_to_start_relogin))
609 {
610 ddb_entry->dev_scan_wait_to_start_relogin = 0;
611 qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0);
612 }
613 }
614 return QLA_SUCCESS;
615}
616
617static int qla4_scan_for_relogin(struct scsi_qla_host *ha,
618 struct qla4_relog_scan *rs)
619{
620 int error;
621
622 /* scan for relogins
623 * ----------------- */
624 for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES;
625 rs->fw_ddb_index = rs->next_fw_ddb_index) {
626 if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0,
627 NULL, &rs->next_fw_ddb_index,
628 &rs->fw_ddb_device_state,
629 &rs->conn_err, NULL, NULL)
630 == QLA_ERROR)
631 return QLA_ERROR;
632
633 if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS)
634 rs->halt_wait = 0;
635
636 if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED ||
637 rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) {
638 error = qla4_test_rdy(ha, rs);
639 if (error)
640 return error;
641 }
642
643 /* We know we've reached the last device when
644 * next_fw_ddb_index is 0 */
645 if (rs->next_fw_ddb_index == 0)
646 break;
647 }
648 return QLA_SUCCESS;
649}
650
651/**
652 * qla4xxx_devices_ready - wait for target devices to be logged in
653 * @ha: pointer to adapter structure
654 *
655 * This routine waits up to ql4xdiscoverywait seconds
656 * F/W database during driver load time.
657 **/
658static int qla4xxx_devices_ready(struct scsi_qla_host *ha)
659{
660 int error;
661 unsigned long discovery_wtime;
662 struct qla4_relog_scan rs;
663
664 discovery_wtime = jiffies + (ql4xdiscoverywait * HZ);
665
666 DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait));
667 do {
668 /* poll for AEN. */
669 qla4xxx_get_firmware_state(ha);
670 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) {
671 /* Set time-between-relogin timer */
672 qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS);
673 }
674
675 /* if no relogins active or needed, halt discvery wait */
676 rs.halt_wait = 1;
677
678 error = qla4_scan_for_relogin(ha, &rs);
679
680 if (rs.halt_wait) {
681 DEBUG2(printk("scsi%ld: %s: Delay halted. Devices "
682 "Ready.\n", ha->host_no, __func__));
683 return QLA_SUCCESS;
684 }
685
686 msleep(2000);
687 } while (!time_after_eq(jiffies, discovery_wtime));
688
689 DEBUG3(qla4xxx_get_conn_event_log(ha));
690
691 return QLA_SUCCESS;
692}
693
694static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
695{
696 unsigned long wtime;
697
698 /* Flush the 0x8014 AEN from the firmware as a result of
699 * Auto connect. We are basically doing get_firmware_ddb()
700 * to determine whether we need to log back in or not.
701 * Trying to do a set ddb before we have processed 0x8014
702 * will result in another set_ddb() for the same ddb. In other
703 * words there will be stale entries in the aen_q.
704 */
705 wtime = jiffies + (2 * HZ);
706 do {
707 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
708 if (ha->firmware_state & (BIT_2 | BIT_0))
709 return;
710
711 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
712 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
713
714 msleep(1000);
715 } while (!time_after_eq(jiffies, wtime));
716
717}
718
719static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
720{
721 uint16_t fw_ddb_index;
722 int status = QLA_SUCCESS;
723
724 /* free the ddb list if is not empty */
725 if (!list_empty(&ha->ddb_list))
726 qla4xxx_free_ddb_list(ha);
727
728 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
729 ha->fw_ddb_index_map[fw_ddb_index] =
730 (struct ddb_entry *)INVALID_ENTRY;
731
732 ha->tot_ddbs = 0;
733
734 qla4xxx_flush_AENS(ha);
735
736 /*
737 * First perform device discovery for active
738 * fw ddb indexes and build
739 * ddb list.
740 */
741 if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
742 return status;
743
744 /* Wait for an AEN */
745 qla4xxx_devices_ready(ha);
746
747 /*
748 * Targets can come online after the inital discovery, so processing
749 * the aens here will catch them.
750 */
751 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
752 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
753
754 return status;
755}
756
757/**
758 * qla4xxx_update_ddb_list - update the driver ddb list
759 * @ha: pointer to host adapter structure.
760 *
761 * This routine obtains device information from the F/W database after
762 * firmware or adapter resets. The device table is preserved.
763 **/
764int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
765{
766 int status = QLA_SUCCESS;
767 struct ddb_entry *ddb_entry, *detemp;
768
769 /* Update the device information for all devices. */
770 list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) {
771 qla4xxx_update_ddb_entry(ha, ddb_entry,
772 ddb_entry->fw_ddb_index);
773 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
774 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
775 DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
776 "ONLINE\n", ha->host_no, __func__,
777 ddb_entry->fw_ddb_index));
778 } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
779 qla4xxx_mark_device_missing(ha, ddb_entry);
780 }
781 return status;
782}
783
784/**
785 * qla4xxx_relogin_device - re-establish session
786 * @ha: Pointer to host adapter structure.
787 * @ddb_entry: Pointer to device database entry
788 *
789 * This routine does a session relogin with the specified device.
790 * The ddb entry must be assigned prior to making this call.
791 **/
792int qla4xxx_relogin_device(struct scsi_qla_host *ha,
793 struct ddb_entry * ddb_entry)
794{
795 uint16_t relogin_timer;
796
797 relogin_timer = max(ddb_entry->default_relogin_timeout,
798 (uint16_t)RELOGIN_TOV);
799 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
800
801 DEBUG2(printk("scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
802 ddb_entry->fw_ddb_index, relogin_timer));
803
804 qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
805
806 return QLA_SUCCESS;
807}
808
809/**
810 * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
811 * @ha: Pointer to host adapter structure.
812 *
813 **/
814static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
815{
816 unsigned long flags;
817 uint16_t topcat;
818
819 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
820 return QLA_ERROR;
821 spin_lock_irqsave(&ha->hardware_lock, flags);
822 topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
823 isp4010.topcat));
824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
825
826 if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
827 set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
828 else
829 clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
830 ql4xxx_unlock_nvram(ha);
831 return QLA_SUCCESS;
832}
833
834
835static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
836{
837 unsigned long flags;
838 union external_hw_config_reg extHwConfig;
839
840 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
841 __func__));
842 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
843 return (QLA_ERROR);
844 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
845 ql4xxx_unlock_flash(ha);
846 return (QLA_ERROR);
847 }
848
849 /* Get EEPRom Parameters from NVRAM and validate */
850 dev_info(&ha->pdev->dev, "Configuring NVRAM ...\n");
851 if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
852 spin_lock_irqsave(&ha->hardware_lock, flags);
853 extHwConfig.Asuint32_t =
854 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
856 } else {
857 /*
858 * QLogic adapters should always have a valid NVRAM.
859 * If not valid, do not load.
860 */
861 dev_warn(&ha->pdev->dev,
862 "scsi%ld: %s: EEProm checksum invalid. "
863 "Please update your EEPROM\n", ha->host_no,
864 __func__);
865
866 /* set defaults */
867 if (is_qla4010(ha))
868 extHwConfig.Asuint32_t = 0x1912;
869 else if (is_qla4022(ha))
870 extHwConfig.Asuint32_t = 0x0023;
871 }
872 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
873 ha->host_no, __func__, extHwConfig.Asuint32_t));
874
875 spin_lock_irqsave(&ha->hardware_lock, flags);
876 writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
877 readl(isp_ext_hw_conf(ha));
878 spin_unlock_irqrestore(&ha->hardware_lock, flags);
879
880 ql4xxx_unlock_nvram(ha);
881 ql4xxx_unlock_flash(ha);
882
883 return (QLA_SUCCESS);
884}
885
886static void qla4x00_pci_config(struct scsi_qla_host *ha)
887{
888 uint16_t w, mwi;
889
890 dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
891
892 pci_set_master(ha->pdev);
893 mwi = 0;
894 if (pci_set_mwi(ha->pdev))
895 mwi = PCI_COMMAND_INVALIDATE;
896 /*
897 * We want to respect framework's setting of PCI configuration space
898 * command register and also want to make sure that all bits of
899 * interest to us are properly set in command register.
900 */
901 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
902 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
903 w &= ~PCI_COMMAND_INTX_DISABLE;
904 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
905}
906
907static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
908{
909 int status = QLA_ERROR;
910 uint32_t max_wait_time;
911 unsigned long flags;
912 uint32_t mbox_status;
913
914 dev_info(&ha->pdev->dev, "Starting firmware ...\n");
915
916 /*
917 * Start firmware from flash ROM
918 *
919 * WORKAROUND: Stuff a non-constant value that the firmware can
920 * use as a seed for a random number generator in MB7 prior to
921 * setting BOOT_ENABLE. Fixes problem where the TCP
922 * connections use the same TCP ports after each reboot,
923 * causing some connections to not get re-established.
924 */
925 DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
926 ha->host_no, __func__));
927
928 spin_lock_irqsave(&ha->hardware_lock, flags);
929 writel(jiffies, &ha->reg->mailbox[7]);
930 if (is_qla4022(ha))
931 writel(set_rmask(NVR_WRITE_ENABLE),
932 &ha->reg->u1.isp4022.nvram);
933
934 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
935 readl(&ha->reg->ctrl_status);
936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
937
938 /* Wait for firmware to come UP. */
939 max_wait_time = FIRMWARE_UP_TOV * 4;
940 do {
941 uint32_t ctrl_status;
942
943 spin_lock_irqsave(&ha->hardware_lock, flags);
944 ctrl_status = readw(&ha->reg->ctrl_status);
945 mbox_status = readw(&ha->reg->mailbox[0]);
946 spin_unlock_irqrestore(&ha->hardware_lock, flags);
947
948 if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
949 break;
950 if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
951 break;
952
953 DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
954 "complete... ctrl_sts=0x%x, remaining=%d\n",
955 ha->host_no, __func__, ctrl_status,
956 max_wait_time));
957
958 msleep(250);
959 } while ((max_wait_time--));
960
961 if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
962 DEBUG(printk("scsi%ld: %s: Firmware has started\n",
963 ha->host_no, __func__));
964
965 spin_lock_irqsave(&ha->hardware_lock, flags);
966 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
967 &ha->reg->ctrl_status);
968 readl(&ha->reg->ctrl_status);
969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970
971 status = QLA_SUCCESS;
972 } else {
973 printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
974 "- mbox status 0x%x\n", ha->host_no, __func__,
975 mbox_status);
976 status = QLA_ERROR;
977 }
978 return status;
979}
980
981static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
982{
983#define QL4_LOCK_DRVR_WAIT 300
984#define QL4_LOCK_DRVR_SLEEP 100
985
986 int drvr_wait = QL4_LOCK_DRVR_WAIT;
987 while (drvr_wait) {
988 if (ql4xxx_lock_drvr(a) == 0) {
989 msleep(QL4_LOCK_DRVR_SLEEP);
990 if (drvr_wait) {
991 DEBUG2(printk("scsi%ld: %s: Waiting for "
992 "Global Init Semaphore...n",
993 a->host_no,
994 __func__));
995 }
996 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
997 } else {
998 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
999 "acquired.n", a->host_no, __func__));
1000 return QLA_SUCCESS;
1001 }
1002 }
1003 return QLA_ERROR;
1004}
1005
1006/**
1007 * qla4xxx_start_firmware - starts qla4xxx firmware
1008 * @ha: Pointer to host adapter structure.
1009 *
1010 * This routine performs the neccessary steps to start the firmware for
1011 * the QLA4010 adapter.
1012 **/
1013static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1014{
1015 unsigned long flags = 0;
1016 uint32_t mbox_status;
1017 int status = QLA_ERROR;
1018 int soft_reset = 1;
1019 int config_chip = 0;
1020
1021 if (is_qla4010(ha)){
1022 if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
1023 return QLA_ERROR;
1024 }
1025
1026 if (is_qla4022(ha))
1027 ql4xxx_set_mac_number(ha);
1028
1029 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1030 return QLA_ERROR;
1031
1032 spin_lock_irqsave(&ha->hardware_lock, flags);
1033
1034 DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no,
1035 __func__, readw(isp_port_ctrl(ha))));
1036 DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
1037 __func__, readw(isp_port_status(ha))));
1038
1039 /* Is Hardware already initialized? */
1040 if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
1041 DEBUG(printk("scsi%ld: %s: Hardware has already been "
1042 "initialized\n", ha->host_no, __func__));
1043
1044 /* Receive firmware boot acknowledgement */
1045 mbox_status = readw(&ha->reg->mailbox[0]);
1046
1047 DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
1048 "0x%x\n", ha->host_no, __func__, mbox_status));
1049
1050 /* Is firmware already booted? */
1051 if (mbox_status == 0) {
1052 /* F/W not running, must be config by net driver */
1053 config_chip = 1;
1054 soft_reset = 0;
1055 } else {
1056 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
1057 &ha->reg->ctrl_status);
1058 readl(&ha->reg->ctrl_status);
1059 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1060 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
1061 DEBUG2(printk("scsi%ld: %s: Get firmware "
1062 "state -- state = 0x%x\n",
1063 ha->host_no,
1064 __func__, ha->firmware_state));
1065 /* F/W is running */
1066 if (ha->firmware_state &
1067 FW_STATE_CONFIG_WAIT) {
1068 DEBUG2(printk("scsi%ld: %s: Firmware "
1069 "in known state -- "
1070 "config and "
1071 "boot, state = 0x%x\n",
1072 ha->host_no, __func__,
1073 ha->firmware_state));
1074 config_chip = 1;
1075 soft_reset = 0;
1076 }
1077 } else {
1078 DEBUG2(printk("scsi%ld: %s: Firmware in "
1079 "unknown state -- resetting,"
1080 " state = "
1081 "0x%x\n", ha->host_no, __func__,
1082 ha->firmware_state));
1083 }
1084 spin_lock_irqsave(&ha->hardware_lock, flags);
1085 }
1086 } else {
1087 DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
1088 "started - resetting\n", ha->host_no, __func__));
1089 }
1090 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1091
1092 DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
1093 ha->host_no, __func__, soft_reset, config_chip));
1094 if (soft_reset) {
1095 DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
1096 __func__));
1097 status = qla4xxx_soft_reset(ha);
1098 if (status == QLA_ERROR) {
1099 DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
1100 ha->host_no, __func__));
1101 ql4xxx_unlock_drvr(ha);
1102 return QLA_ERROR;
1103 }
1104 config_chip = 1;
1105
1106 /* Reset clears the semaphore, so aquire again */
1107 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1108 return QLA_ERROR;
1109 }
1110
1111 if (config_chip) {
1112 if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
1113 status = qla4xxx_start_firmware_from_flash(ha);
1114 }
1115
1116 ql4xxx_unlock_drvr(ha);
1117 if (status == QLA_SUCCESS) {
1118 qla4xxx_get_fw_version(ha);
1119 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
1120 qla4xxx_get_crash_record(ha);
1121 } else {
1122 DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
1123 ha->host_no, __func__));
1124 }
1125 return status;
1126}
1127
1128
1129/**
1130 * qla4xxx_initialize_adapter - initiailizes hba
1131 * @ha: Pointer to host adapter structure.
1132 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
1133 * after adapter recovery has completed.
1134 * 0=preserve ddb list, 1=destroy and rebuild ddb list
1135 *
1136 * This routine parforms all of the steps necessary to initialize the adapter.
1137 *
1138 **/
1139int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1140 uint8_t renew_ddb_list)
1141{
1142 int status = QLA_ERROR;
1143 int8_t ip_address[IP_ADDR_LEN] = {0} ;
1144
1145 ha->eeprom_cmd_data = 0;
1146
1147 qla4x00_pci_config(ha);
1148
1149 qla4xxx_disable_intrs(ha);
1150
1151 /* Initialize the Host adapter request/response queues and firmware */
1152 if (qla4xxx_start_firmware(ha) == QLA_ERROR)
1153 return status;
1154
1155 if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
1156 return status;
1157
1158 if (qla4xxx_init_local_data(ha) == QLA_ERROR)
1159 return status;
1160
1161 status = qla4xxx_init_firmware(ha);
1162 if (status == QLA_ERROR)
1163 return status;
1164
1165 /*
1166 * FW is waiting to get an IP address from DHCP server: Skip building
1167 * the ddb_list and wait for DHCP lease acquired aen to come in
1168 * followed by 0x8014 aen" to trigger the tgt discovery process.
1169 */
1170 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
1171 return status;
1172
1173 /* Skip device discovery if ip and subnet is zero */
1174 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
1175 memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
1176 return status;
1177
1178 if (renew_ddb_list == PRESERVE_DDB_LIST) {
1179 /*
1180 * We want to preserve lun states (i.e. suspended, etc.)
1181 * for recovery initiated by the driver. So just update
1182 * the device states for the existing ddb_list.
1183 */
1184 qla4xxx_reinitialize_ddb_list(ha);
1185 } else if (renew_ddb_list == REBUILD_DDB_LIST) {
1186 /*
1187 * We want to build the ddb_list from scratch during
1188 * driver initialization and recovery initiated by the
1189 * INT_HBA_RESET IOCTL.
1190 */
1191 status = qla4xxx_initialize_ddb_list(ha);
1192 if (status == QLA_ERROR) {
1193 DEBUG2(printk("%s(%ld) Error occurred during build"
1194 "ddb list\n", __func__, ha->host_no));
1195 goto exit_init_hba;
1196 }
1197
1198 }
1199 if (!ha->tot_ddbs) {
1200 DEBUG2(printk("scsi%ld: Failed to initialize devices or none "
1201 "present in Firmware device database\n",
1202 ha->host_no));
1203 }
1204
1205 exit_init_hba:
1206 return status;
1207
1208}
1209
1210/**
1211 * qla4xxx_add_device_dynamically - ddb addition due to an AEN
1212 * @ha: Pointer to host adapter structure.
1213 * @fw_ddb_index: Firmware's device database index
1214 *
1215 * This routine processes adds a device as a result of an 8014h AEN.
1216 **/
1217static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1218 uint32_t fw_ddb_index)
1219{
1220 struct ddb_entry * ddb_entry;
1221
1222 /* First allocate a device structure */
1223 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
1224 if (ddb_entry == NULL) {
1225 DEBUG2(printk(KERN_WARNING
1226 "scsi%ld: Unable to allocate memory to add "
1227 "fw_ddb_index %d\n", ha->host_no, fw_ddb_index));
1228 return;
1229 }
1230
1231 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
1232 QLA_ERROR) {
1233 ha->fw_ddb_index_map[fw_ddb_index] =
1234 (struct ddb_entry *)INVALID_ENTRY;
1235 DEBUG2(printk(KERN_WARNING
1236 "scsi%ld: failed to add new device at index "
1237 "[%d]\n Unable to retrieve fw ddb entry\n",
1238 ha->host_no, fw_ddb_index));
1239 qla4xxx_free_ddb(ha, ddb_entry);
1240 return;
1241 }
1242
1243 if (qla4xxx_add_sess(ddb_entry)) {
1244 DEBUG2(printk(KERN_WARNING
1245 "scsi%ld: failed to add new device at index "
1246 "[%d]\n Unable to add connection and session\n",
1247 ha->host_no, fw_ddb_index));
1248 qla4xxx_free_ddb(ha, ddb_entry);
1249 }
1250}
1251
1252/**
1253 * qla4xxx_process_ddb_changed - process ddb state change
1254 * @ha - Pointer to host adapter structure.
1255 * @fw_ddb_index - Firmware's device database index
1256 * @state - Device state
1257 *
1258 * This routine processes a Decive Database Changed AEN Event.
1259 **/
1260int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1261 uint32_t fw_ddb_index, uint32_t state)
1262{
1263 struct ddb_entry * ddb_entry;
1264 uint32_t old_fw_ddb_device_state;
1265
1266 /* check for out of range index */
1267 if (fw_ddb_index >= MAX_DDB_ENTRIES)
1268 return QLA_ERROR;
1269
1270 /* Get the corresponging ddb entry */
1271 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
1272 /* Device does not currently exist in our database. */
1273 if (ddb_entry == NULL) {
1274 if (state == DDB_DS_SESSION_ACTIVE)
1275 qla4xxx_add_device_dynamically(ha, fw_ddb_index);
1276 return QLA_SUCCESS;
1277 }
1278
1279 /* Device already exists in our database. */
1280 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
1281 DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
1282 "index [%d]\n", ha->host_no, __func__,
1283 ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
1284 if (old_fw_ddb_device_state == state &&
1285 state == DDB_DS_SESSION_ACTIVE) {
1286 /* Do nothing, state not changed. */
1287 return QLA_SUCCESS;
1288 }
1289
1290 ddb_entry->fw_ddb_device_state = state;
1291 /* Device is back online. */
1292 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
1293 atomic_set(&ddb_entry->port_down_timer,
1294 ha->port_down_retry_count);
1295 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1296 atomic_set(&ddb_entry->relogin_retry_count, 0);
1297 atomic_set(&ddb_entry->relogin_timer, 0);
1298 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1299 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
1300 iscsi_if_create_session_done(ddb_entry->conn);
1301 /*
1302 * Change the lun state to READY in case the lun TIMEOUT before
1303 * the device came back.
1304 */
1305 } else {
1306 /* Device went away, try to relogin. */
1307 /* Mark device missing */
1308 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
1309 qla4xxx_mark_device_missing(ha, ddb_entry);
1310 /*
1311 * Relogin if device state changed to a not active state.
1312 * However, do not relogin if this aen is a result of an IOCTL
1313 * logout (DF_NO_RELOGIN) or if this is a discovered device.
1314 */
1315 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
1316 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
1317 !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
1318 !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
1319 /*
1320 * This triggers a relogin. After the relogin_timer
1321 * expires, the relogin gets scheduled. We must wait a
1322 * minimum amount of time since receiving an 0x8014 AEN
1323 * with failed device_state or a logout response before
1324 * we can issue another relogin.
1325 */
1326 /* Firmware padds this timeout: (time2wait +1).
1327 * Driver retry to login should be longer than F/W.
1328 * Otherwise F/W will fail
1329 * set_ddb() mbx cmd with 0x4005 since it still
1330 * counting down its time2wait.
1331 */
1332 atomic_set(&ddb_entry->relogin_timer, 0);
1333 atomic_set(&ddb_entry->retry_relogin_timer,
1334 ddb_entry->default_time2wait + 4);
1335 }
1336 }
1337
1338 return QLA_SUCCESS;
1339}
1340
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
new file mode 100644
index 00000000000..0d61797af7d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -0,0 +1,84 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8/*
9 *
10 * qla4xxx_lookup_ddb_by_fw_index
11 * This routine locates a device handle given the firmware device
12 * database index. If device doesn't exist, returns NULL.
13 *
14 * Input:
15 * ha - Pointer to host adapter structure.
16 * fw_ddb_index - Firmware's device database index
17 *
18 * Returns:
19 * Pointer to the corresponding internal device database structure
20 */
21static inline struct ddb_entry *
22qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
23{
24 struct ddb_entry *ddb_entry = NULL;
25
26 if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
27 (ha->fw_ddb_index_map[fw_ddb_index] !=
28 (struct ddb_entry *) INVALID_ENTRY)) {
29 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
30 }
31
32 DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n",
33 ha->host_no, __func__, fw_ddb_index, ddb_entry));
34
35 return ddb_entry;
36}
37
38static inline void
39__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
40{
41 if (is_qla4022(ha)) {
42 writel(set_rmask(IMR_SCSI_INTR_ENABLE),
43 &ha->reg->u1.isp4022.intr_mask);
44 readl(&ha->reg->u1.isp4022.intr_mask);
45 } else {
46 writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
47 readl(&ha->reg->ctrl_status);
48 }
49 set_bit(AF_INTERRUPTS_ON, &ha->flags);
50}
51
52static inline void
53__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
54{
55 if (is_qla4022(ha)) {
56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
57 &ha->reg->u1.isp4022.intr_mask);
58 readl(&ha->reg->u1.isp4022.intr_mask);
59 } else {
60 writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
61 readl(&ha->reg->ctrl_status);
62 }
63 clear_bit(AF_INTERRUPTS_ON, &ha->flags);
64}
65
66static inline void
67qla4xxx_enable_intrs(struct scsi_qla_host *ha)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&ha->hardware_lock, flags);
72 __qla4xxx_enable_intrs(ha);
73 spin_unlock_irqrestore(&ha->hardware_lock, flags);
74}
75
76static inline void
77qla4xxx_disable_intrs(struct scsi_qla_host *ha)
78{
79 unsigned long flags;
80
81 spin_lock_irqsave(&ha->hardware_lock, flags);
82 __qla4xxx_disable_intrs(ha);
83 spin_unlock_irqrestore(&ha->hardware_lock, flags);
84}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
new file mode 100644
index 00000000000..c0a254b89a3
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -0,0 +1,368 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10#include <scsi/scsi_tcq.h>
11
12/**
13 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
14 * @ha: Pointer to host adapter structure.
15 * @queue_entry: Pointer to pointer to queue entry structure
16 *
17 * This routine performs the following tasks:
18 * - returns the current request_in pointer (if queue not full)
19 * - advances the request_in pointer
20 * - checks for queue full
21 **/
22int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
23 struct queue_entry **queue_entry)
24{
25 uint16_t request_in;
26 uint8_t status = QLA_SUCCESS;
27
28 *queue_entry = ha->request_ptr;
29
30 /* get the latest request_in and request_out index */
31 request_in = ha->request_in;
32 ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
33
34 /* Advance request queue pointer and check for queue full */
35 if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
36 request_in = 0;
37 ha->request_ptr = ha->request_ring;
38 } else {
39 request_in++;
40 ha->request_ptr++;
41 }
42
43 /* request queue is full, try again later */
44 if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
45 /* restore request pointer */
46 ha->request_ptr = *queue_entry;
47 status = QLA_ERROR;
48 } else {
49 ha->request_in = request_in;
50 memset(*queue_entry, 0, sizeof(**queue_entry));
51 }
52
53 return status;
54}
55
56/**
57 * qla4xxx_send_marker_iocb - issues marker iocb to HBA
58 * @ha: Pointer to host adapter structure.
59 * @ddb_entry: Pointer to device database entry
60 * @lun: SCSI LUN
61 * @marker_type: marker identifier
62 *
63 * This routine issues a marker IOCB.
64 **/
65int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
66 struct ddb_entry *ddb_entry, int lun)
67{
68 struct marker_entry *marker_entry;
69 unsigned long flags = 0;
70 uint8_t status = QLA_SUCCESS;
71
72 /* Acquire hardware specific lock */
73 spin_lock_irqsave(&ha->hardware_lock, flags);
74
75 /* Get pointer to the queue entry for the marker */
76 if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
77 QLA_SUCCESS) {
78 status = QLA_ERROR;
79 goto exit_send_marker;
80 }
81
82 /* Put the marker in the request queue */
83 marker_entry->hdr.entryType = ET_MARKER;
84 marker_entry->hdr.entryCount = 1;
85 marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
86 marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
87 int_to_scsilun(lun, &marker_entry->lun);
88 wmb();
89
90 /* Tell ISP it's got a new I/O request */
91 writel(ha->request_in, &ha->reg->req_q_in);
92 readl(&ha->reg->req_q_in);
93
94exit_send_marker:
95 spin_unlock_irqrestore(&ha->hardware_lock, flags);
96 return status;
97}
98
99struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
100 struct scsi_qla_host *ha)
101{
102 struct continuation_t1_entry *cont_entry;
103
104 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
105
106 /* Advance request queue pointer */
107 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
108 ha->request_in = 0;
109 ha->request_ptr = ha->request_ring;
110 } else {
111 ha->request_in++;
112 ha->request_ptr++;
113 }
114
115 /* Load packet defaults */
116 cont_entry->hdr.entryType = ET_CONTINUE;
117 cont_entry->hdr.entryCount = 1;
118 cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
119
120 return cont_entry;
121}
122
123uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
124{
125 uint16_t iocbs;
126
127 iocbs = 1;
128 if (dsds > COMMAND_SEG) {
129 iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
130 if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
131 iocbs++;
132 }
133 return iocbs;
134}
135
136void qla4xxx_build_scsi_iocbs(struct srb *srb,
137 struct command_t3_entry *cmd_entry,
138 uint16_t tot_dsds)
139{
140 struct scsi_qla_host *ha;
141 uint16_t avail_dsds;
142 struct data_seg_a64 *cur_dsd;
143 struct scsi_cmnd *cmd;
144
145 cmd = srb->cmd;
146 ha = srb->ha;
147
148 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
149 /* No data being transferred */
150 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
151 return;
152 }
153
154 avail_dsds = COMMAND_SEG;
155 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
156
157 /* Load data segments */
158 if (cmd->use_sg) {
159 struct scatterlist *cur_seg;
160 struct scatterlist *end_seg;
161
162 cur_seg = (struct scatterlist *)cmd->request_buffer;
163 end_seg = cur_seg + tot_dsds;
164 while (cur_seg < end_seg) {
165 dma_addr_t sle_dma;
166
167 /* Allocate additional continuation packets? */
168 if (avail_dsds == 0) {
169 struct continuation_t1_entry *cont_entry;
170
171 cont_entry = qla4xxx_alloc_cont_entry(ha);
172 cur_dsd =
173 (struct data_seg_a64 *)
174 &cont_entry->dataseg[0];
175 avail_dsds = CONTINUE_SEG;
176 }
177
178 sle_dma = sg_dma_address(cur_seg);
179 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
180 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
181 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
182 avail_dsds--;
183
184 cur_dsd++;
185 cur_seg++;
186 }
187 } else {
188 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
189 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
190 cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
191 }
192}
193
194/**
195 * qla4xxx_send_command_to_isp - issues command to HBA
196 * @ha: pointer to host adapter structure.
197 * @srb: pointer to SCSI Request Block to be sent to ISP
198 *
199 * This routine is called by qla4xxx_queuecommand to build an ISP
200 * command and pass it to the ISP for execution.
201 **/
202int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
203{
204 struct scsi_cmnd *cmd = srb->cmd;
205 struct ddb_entry *ddb_entry;
206 struct command_t3_entry *cmd_entry;
207 struct scatterlist *sg = NULL;
208
209 uint16_t tot_dsds;
210 uint16_t req_cnt;
211
212 unsigned long flags;
213 uint16_t cnt;
214 uint32_t index;
215 char tag[2];
216
217 /* Get real lun and adapter */
218 ddb_entry = srb->ddb;
219
220 /* Send marker(s) if needed. */
221 if (ha->marker_needed == 1) {
222 if (qla4xxx_send_marker_iocb(ha, ddb_entry,
223 cmd->device->lun) != QLA_SUCCESS)
224 return QLA_ERROR;
225
226 ha->marker_needed = 0;
227 }
228 tot_dsds = 0;
229
230 /* Acquire hardware specific lock */
231 spin_lock_irqsave(&ha->hardware_lock, flags);
232
233 index = (uint32_t)cmd->request->tag;
234
235 /* Calculate the number of request entries needed. */
236 if (cmd->use_sg) {
237 sg = (struct scatterlist *)cmd->request_buffer;
238 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
239 cmd->sc_data_direction);
240 if (tot_dsds == 0)
241 goto queuing_error;
242 } else if (cmd->request_bufflen) {
243 dma_addr_t req_dma;
244
245 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
246 cmd->request_bufflen,
247 cmd->sc_data_direction);
248 if (dma_mapping_error(req_dma))
249 goto queuing_error;
250
251 srb->dma_handle = req_dma;
252 tot_dsds = 1;
253 }
254 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
255
256 if (ha->req_q_count < (req_cnt + 2)) {
257 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
258 if (ha->request_in < cnt)
259 ha->req_q_count = cnt - ha->request_in;
260 else
261 ha->req_q_count = REQUEST_QUEUE_DEPTH -
262 (ha->request_in - cnt);
263 }
264
265 if (ha->req_q_count < (req_cnt + 2))
266 goto queuing_error;
267
268 /* total iocbs active */
269 if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
270 goto queuing_error;
271
272 /* Build command packet */
273 cmd_entry = (struct command_t3_entry *) ha->request_ptr;
274 memset(cmd_entry, 0, sizeof(struct command_t3_entry));
275 cmd_entry->hdr.entryType = ET_COMMAND;
276 cmd_entry->handle = cpu_to_le32(index);
277 cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
278 cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
279
280 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
281 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
282 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
283 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
284 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
285 cmd_entry->hdr.entryCount = req_cnt;
286
287 /* Set data transfer direction control flags
288 * NOTE: Look at data_direction bits iff there is data to be
289 * transferred, as the data direction bit is sometimed filled
290 * in when there is no data to be transferred */
291 cmd_entry->control_flags = CF_NO_DATA;
292 if (cmd->request_bufflen) {
293 if (cmd->sc_data_direction == DMA_TO_DEVICE)
294 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ;
297 }
298
299 /* Set tagged queueing control flags */
300 cmd_entry->control_flags |= CF_SIMPLE_TAG;
301 if (scsi_populate_tag_msg(cmd, tag))
302 switch (tag[0]) {
303 case MSG_HEAD_TAG:
304 cmd_entry->control_flags |= CF_HEAD_TAG;
305 break;
306 case MSG_ORDERED_TAG:
307 cmd_entry->control_flags |= CF_ORDERED_TAG;
308 break;
309 }
310
311
312 /* Advance request queue pointer */
313 ha->request_in++;
314 if (ha->request_in == REQUEST_QUEUE_DEPTH) {
315 ha->request_in = 0;
316 ha->request_ptr = ha->request_ring;
317 } else
318 ha->request_ptr++;
319
320
321 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
322 wmb();
323
324 /*
325 * Check to see if adapter is online before placing request on
326 * request queue. If a reset occurs and a request is in the queue,
327 * the firmware will still attempt to process the request, retrieving
328 * garbage for pointers.
329 */
330 if (!test_bit(AF_ONLINE, &ha->flags)) {
331 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
332 "Do not issue command.\n",
333 ha->host_no, __func__));
334 goto queuing_error;
335 }
336
337 srb->cmd->host_scribble = (unsigned char *)srb;
338
339 /* update counters */
340 srb->state = SRB_ACTIVE_STATE;
341 srb->flags |= SRB_DMA_VALID;
342
343 /* Track IOCB used */
344 ha->iocb_cnt += req_cnt;
345 srb->iocb_cnt = req_cnt;
346 ha->req_q_count -= req_cnt;
347
348 /* Debug print statements */
349 writel(ha->request_in, &ha->reg->req_q_in);
350 readl(&ha->reg->req_q_in);
351 spin_unlock_irqrestore(&ha->hardware_lock, flags);
352
353 return QLA_SUCCESS;
354
355queuing_error:
356
357 if (cmd->use_sg && tot_dsds) {
358 sg = (struct scatterlist *) cmd->request_buffer;
359 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
360 cmd->sc_data_direction);
361 } else if (tot_dsds)
362 pci_unmap_single(ha->pdev, srb->dma_handle,
363 cmd->request_bufflen, cmd->sc_data_direction);
364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
365
366 return QLA_ERROR;
367}
368
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
new file mode 100644
index 00000000000..b584317608d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -0,0 +1,797 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10/**
11 * qla2x00_process_completed_request() - Process a Fast Post response.
12 * @ha: SCSI driver HA context
13 * @index: SRB index
14 **/
15static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
16 uint32_t index)
17{
18 struct srb *srb;
19
20 srb = qla4xxx_del_from_active_array(ha, index);
21 if (srb) {
22 /* Save ISP completion status */
23 srb->cmd->result = DID_OK << 16;
24 qla4xxx_srb_compl(ha, srb);
25 } else {
26 DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
27 "%d\n", ha->host_no, index));
28 set_bit(DPC_RESET_HA, &ha->dpc_flags);
29 }
30}
31
32/**
33 * qla4xxx_status_entry - processes status IOCBs
34 * @ha: Pointer to host adapter structure.
35 * @sts_entry: Pointer to status entry structure.
36 **/
37static void qla4xxx_status_entry(struct scsi_qla_host *ha,
38 struct status_entry *sts_entry)
39{
40 uint8_t scsi_status;
41 struct scsi_cmnd *cmd;
42 struct srb *srb;
43 struct ddb_entry *ddb_entry;
44 uint32_t residual;
45 uint16_t sensebytecnt;
46
47 if (sts_entry->completionStatus == SCS_COMPLETE &&
48 sts_entry->scsiStatus == 0) {
49 qla4xxx_process_completed_request(ha,
50 le32_to_cpu(sts_entry->
51 handle));
52 return;
53 }
54
55 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
56 if (!srb) {
57 /* FIXMEdg: Don't we need to reset ISP in this case??? */
58 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
59 "handle 0x%x, sp=%p. This cmd may have already "
60 "been completed.\n", ha->host_no, __func__,
61 le32_to_cpu(sts_entry->handle), srb));
62 return;
63 }
64
65 cmd = srb->cmd;
66 if (cmd == NULL) {
67 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
68 "OS pkt->handle=%d srb=%p srb->state:%d\n",
69 ha->host_no, __func__, sts_entry->handle,
70 srb, srb->state));
71 dev_warn(&ha->pdev->dev, "Command is NULL:"
72 " already returned to OS (srb=%p)\n", srb);
73 return;
74 }
75
76 ddb_entry = srb->ddb;
77 if (ddb_entry == NULL) {
78 cmd->result = DID_NO_CONNECT << 16;
79 goto status_entry_exit;
80 }
81
82 residual = le32_to_cpu(sts_entry->residualByteCnt);
83
84 /* Translate ISP error to a Linux SCSI error. */
85 scsi_status = sts_entry->scsiStatus;
86 switch (sts_entry->completionStatus) {
87 case SCS_COMPLETE:
88 if (scsi_status == 0) {
89 cmd->result = DID_OK << 16;
90 break;
91 }
92
93 if (sts_entry->iscsiFlags &
94 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
95 cmd->resid = residual;
96
97 cmd->result = DID_OK << 16 | scsi_status;
98
99 if (scsi_status != SCSI_CHECK_CONDITION)
100 break;
101
102 /* Copy Sense Data into sense buffer. */
103 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
104
105 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
106 if (sensebytecnt == 0)
107 break;
108
109 memcpy(cmd->sense_buffer, sts_entry->senseData,
110 min(sensebytecnt,
111 (uint16_t) sizeof(cmd->sense_buffer)));
112
113 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
114 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
115 cmd->device->channel, cmd->device->id,
116 cmd->device->lun, __func__,
117 sts_entry->senseData[2] & 0x0f,
118 sts_entry->senseData[12],
119 sts_entry->senseData[13]));
120
121 srb->flags |= SRB_GOT_SENSE;
122 break;
123
124 case SCS_INCOMPLETE:
125 /* Always set the status to DID_ERROR, since
126 * all conditions result in that status anyway */
127 cmd->result = DID_ERROR << 16;
128 break;
129
130 case SCS_RESET_OCCURRED:
131 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
132 ha->host_no, cmd->device->channel,
133 cmd->device->id, cmd->device->lun, __func__));
134
135 cmd->result = DID_RESET << 16;
136 break;
137
138 case SCS_ABORTED:
139 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
140 ha->host_no, cmd->device->channel,
141 cmd->device->id, cmd->device->lun, __func__));
142
143 cmd->result = DID_RESET << 16;
144 break;
145
146 case SCS_TIMEOUT:
147 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
148 ha->host_no, cmd->device->channel,
149 cmd->device->id, cmd->device->lun));
150
151 cmd->result = DID_BUS_BUSY << 16;
152
153 /*
154 * Mark device missing so that we won't continue to send
155 * I/O to this device. We should get a ddb state change
156 * AEN soon.
157 */
158 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
159 qla4xxx_mark_device_missing(ha, ddb_entry);
160 break;
161
162 case SCS_DATA_UNDERRUN:
163 case SCS_DATA_OVERRUN:
164 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
165 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
166 "residual = 0x%x\n", ha->host_no,
167 cmd->device->channel, cmd->device->id,
168 cmd->device->lun, __func__, residual));
169
170 cmd->result = DID_ERROR << 16;
171 break;
172 }
173
174 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
175 /*
176 * Firmware detected a SCSI transport underrun
177 * condition
178 */
179 cmd->resid = residual;
180 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
181 "detected, xferlen = 0x%x, residual = "
182 "0x%x\n",
183 ha->host_no, cmd->device->channel,
184 cmd->device->id,
185 cmd->device->lun, __func__,
186 cmd->request_bufflen,
187 residual));
188 }
189
190 /*
191 * If there is scsi_status, it takes precedense over
192 * underflow condition.
193 */
194 if (scsi_status != 0) {
195 cmd->result = DID_OK << 16 | scsi_status;
196
197 if (scsi_status != SCSI_CHECK_CONDITION)
198 break;
199
200 /* Copy Sense Data into sense buffer. */
201 memset(cmd->sense_buffer, 0,
202 sizeof(cmd->sense_buffer));
203
204 sensebytecnt =
205 le16_to_cpu(sts_entry->senseDataByteCnt);
206 if (sensebytecnt == 0)
207 break;
208
209 memcpy(cmd->sense_buffer, sts_entry->senseData,
210 min(sensebytecnt,
211 (uint16_t) sizeof(cmd->sense_buffer)));
212
213 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
214 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
215 cmd->device->channel, cmd->device->id,
216 cmd->device->lun, __func__,
217 sts_entry->senseData[2] & 0x0f,
218 sts_entry->senseData[12],
219 sts_entry->senseData[13]));
220 } else {
221 /*
222 * If RISC reports underrun and target does not
223 * report it then we must have a lost frame, so
224 * tell upper layer to retry it by reporting a
225 * bus busy.
226 */
227 if ((sts_entry->iscsiFlags &
228 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
229 cmd->result = DID_BUS_BUSY << 16;
230 } else if ((cmd->request_bufflen - residual) <
231 cmd->underflow) {
232 /*
233 * Handle mid-layer underflow???
234 *
235 * For kernels less than 2.4, the driver must
236 * return an error if an underflow is detected.
237 * For kernels equal-to and above 2.4, the
238 * mid-layer will appearantly handle the
239 * underflow by detecting the residual count --
240 * unfortunately, we do not see where this is
241 * actually being done. In the interim, we
242 * will return DID_ERROR.
243 */
244 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
245 "Mid-layer Data underrun, "
246 "xferlen = 0x%x, "
247 "residual = 0x%x\n", ha->host_no,
248 cmd->device->channel,
249 cmd->device->id,
250 cmd->device->lun, __func__,
251 cmd->request_bufflen, residual));
252
253 cmd->result = DID_ERROR << 16;
254 } else {
255 cmd->result = DID_OK << 16;
256 }
257 }
258 break;
259
260 case SCS_DEVICE_LOGGED_OUT:
261 case SCS_DEVICE_UNAVAILABLE:
262 /*
263 * Mark device missing so that we won't continue to
264 * send I/O to this device. We should get a ddb
265 * state change AEN soon.
266 */
267 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
268 qla4xxx_mark_device_missing(ha, ddb_entry);
269
270 cmd->result = DID_BUS_BUSY << 16;
271 break;
272
273 case SCS_QUEUE_FULL:
274 /*
275 * SCSI Mid-Layer handles device queue full
276 */
277 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
278 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
279 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
280 " iResp=%02x\n", ha->host_no, cmd->device->id,
281 cmd->device->lun, __func__,
282 sts_entry->completionStatus,
283 sts_entry->scsiStatus, sts_entry->state_flags,
284 sts_entry->iscsiFlags,
285 sts_entry->iscsiResponse));
286 break;
287
288 default:
289 cmd->result = DID_ERROR << 16;
290 break;
291 }
292
293status_entry_exit:
294
295 /* complete the request */
296 srb->cc_stat = sts_entry->completionStatus;
297 qla4xxx_srb_compl(ha, srb);
298}
299
300/**
301 * qla4xxx_process_response_queue - process response queue completions
302 * @ha: Pointer to host adapter structure.
303 *
304 * This routine process response queue completions in interrupt context.
305 * Hardware_lock locked upon entry
306 **/
307static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
308{
309 uint32_t count = 0;
310 struct srb *srb = NULL;
311 struct status_entry *sts_entry;
312
313 /* Process all responses from response queue */
314 while ((ha->response_in =
315 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
316 ha->response_out) {
317 sts_entry = (struct status_entry *) ha->response_ptr;
318 count++;
319
320 /* Advance pointers for next entry */
321 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
322 ha->response_out = 0;
323 ha->response_ptr = ha->response_ring;
324 } else {
325 ha->response_out++;
326 ha->response_ptr++;
327 }
328
329 /* process entry */
330 switch (sts_entry->hdr.entryType) {
331 case ET_STATUS:
332 /*
333 * Common status - Single completion posted in single
334 * IOSB.
335 */
336 qla4xxx_status_entry(ha, sts_entry);
337 break;
338
339 case ET_PASSTHRU_STATUS:
340 break;
341
342 case ET_STATUS_CONTINUATION:
343 /* Just throw away the status continuation entries */
344 DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
345 "- ignoring\n", ha->host_no, __func__));
346 break;
347
348 case ET_COMMAND:
349 /* ISP device queue is full. Command not
350 * accepted by ISP. Queue command for
351 * later */
352
353 srb = qla4xxx_del_from_active_array(ha,
354 le32_to_cpu(sts_entry->
355 handle));
356 if (srb == NULL)
357 goto exit_prq_invalid_handle;
358
359 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
360 "srb %p\n", ha->host_no, __func__, srb));
361
362 /* ETRY normally by sending it back with
363 * DID_BUS_BUSY */
364 srb->cmd->result = DID_BUS_BUSY << 16;
365 qla4xxx_srb_compl(ha, srb);
366 break;
367
368 case ET_CONTINUE:
369 /* Just throw away the continuation entries */
370 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
371 "ignoring\n", ha->host_no, __func__));
372 break;
373
374 default:
375 /*
376 * Invalid entry in response queue, reset RISC
377 * firmware.
378 */
379 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
380 "response queue \n", ha->host_no,
381 __func__,
382 sts_entry->hdr.entryType));
383 goto exit_prq_error;
384 }
385 }
386
387 /*
388 * Done with responses, update the ISP For QLA4010, this also clears
389 * the interrupt.
390 */
391 writel(ha->response_out, &ha->reg->rsp_q_out);
392 readl(&ha->reg->rsp_q_out);
393
394 return;
395
396exit_prq_invalid_handle:
397 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
398 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
399 sts_entry->completionStatus));
400
401exit_prq_error:
402 writel(ha->response_out, &ha->reg->rsp_q_out);
403 readl(&ha->reg->rsp_q_out);
404
405 set_bit(DPC_RESET_HA, &ha->dpc_flags);
406}
407
408/**
409 * qla4xxx_isr_decode_mailbox - decodes mailbox status
410 * @ha: Pointer to host adapter structure.
411 * @mailbox_status: Mailbox status.
412 *
413 * This routine decodes the mailbox status during the ISR.
414 * Hardware_lock locked upon entry. runs in interrupt context.
415 **/
416static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
417 uint32_t mbox_status)
418{
419 int i;
420
421 if ((mbox_status == MBOX_STS_BUSY) ||
422 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
423 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
424 ha->mbox_status[0] = mbox_status;
425
426 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
427 /*
428 * Copy all mailbox registers to a temporary
429 * location and set mailbox command done flag
430 */
431 for (i = 1; i < ha->mbox_status_count; i++)
432 ha->mbox_status[i] =
433 readl(&ha->reg->mailbox[i]);
434
435 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
436 wake_up(&ha->mailbox_wait_queue);
437 }
438 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
439 /* Immediately process the AENs that don't require much work.
440 * Only queue the database_changed AENs */
441 switch (mbox_status) {
442 case MBOX_ASTS_SYSTEM_ERROR:
443 /* Log Mailbox registers */
444 if (ql4xdontresethba) {
445 DEBUG2(printk("%s:Dont Reset HBA\n",
446 __func__));
447 } else {
448 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
449 set_bit(DPC_RESET_HA, &ha->dpc_flags);
450 }
451 break;
452
453 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
454 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
455 case MBOX_ASTS_NVRAM_INVALID:
456 case MBOX_ASTS_IP_ADDRESS_CHANGED:
457 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
458 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
459 "Reset HA\n", ha->host_no, mbox_status));
460 set_bit(DPC_RESET_HA, &ha->dpc_flags);
461 break;
462
463 case MBOX_ASTS_LINK_UP:
464 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
465 ha->host_no, mbox_status));
466 set_bit(AF_LINK_UP, &ha->flags);
467 break;
468
469 case MBOX_ASTS_LINK_DOWN:
470 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
471 ha->host_no, mbox_status));
472 clear_bit(AF_LINK_UP, &ha->flags);
473 break;
474
475 case MBOX_ASTS_HEARTBEAT:
476 ha->seconds_since_last_heartbeat = 0;
477 break;
478
479 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
480 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
481 "ACQUIRED\n", ha->host_no, mbox_status));
482 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
483 break;
484
485 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
486 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
487 * mode
488 * only */
489 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
490 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
491 case MBOX_ASTS_SUBNET_STATE_CHANGE:
492 /* No action */
493 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
494 mbox_status));
495 break;
496
497 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
498 case MBOX_ASTS_DNS:
499 /* No action */
500 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
501 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
502 ha->host_no, mbox_status,
503 readl(&ha->reg->mailbox[1]),
504 readl(&ha->reg->mailbox[2])));
505 break;
506
507 case MBOX_ASTS_SELF_TEST_FAILED:
508 case MBOX_ASTS_LOGIN_FAILED:
509 /* No action */
510 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
511 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
512 ha->host_no, mbox_status,
513 readl(&ha->reg->mailbox[1]),
514 readl(&ha->reg->mailbox[2]),
515 readl(&ha->reg->mailbox[3])));
516 break;
517
518 case MBOX_ASTS_DATABASE_CHANGED:
519 /* Queue AEN information and process it in the DPC
520 * routine */
521 if (ha->aen_q_count > 0) {
522 /* advance pointer */
523 if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
524 ha->aen_in = 0;
525 else
526 ha->aen_in++;
527
528 /* decrement available counter */
529 ha->aen_q_count--;
530
531 for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
532 ha->aen_q[ha->aen_in].mbox_sts[i] =
533 readl(&ha->reg->mailbox[i]);
534
535 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
536
537 /* print debug message */
538 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
539 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
540 ha->host_no, ha->aen_in,
541 mbox_status,
542 ha->aen_q[ha->aen_in].mbox_sts[1],
543 ha->aen_q[ha->aen_in].mbox_sts[2],
544 ha->aen_q[ha->aen_in].mbox_sts[3],
545 ha->aen_q[ha->aen_in]. mbox_sts[4]));
546
547 /* The DPC routine will process the aen */
548 set_bit(DPC_AEN, &ha->dpc_flags);
549 } else {
550 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
551 "overflowed! AEN LOST!!\n",
552 ha->host_no, __func__,
553 mbox_status));
554
555 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
556 ha->host_no));
557
558 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
559 DEBUG2(printk("AEN[%d] %04x %04x %04x "
560 "%04x\n", i,
561 ha->aen_q[i].mbox_sts[0],
562 ha->aen_q[i].mbox_sts[1],
563 ha->aen_q[i].mbox_sts[2],
564 ha->aen_q[i].mbox_sts[3]));
565 }
566 }
567 break;
568
569 default:
570 DEBUG2(printk(KERN_WARNING
571 "scsi%ld: AEN %04x UNKNOWN\n",
572 ha->host_no, mbox_status));
573 break;
574 }
575 } else {
576 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
577 ha->host_no, mbox_status));
578
579 ha->mbox_status[0] = mbox_status;
580 }
581}
582
583/**
584 * qla4xxx_interrupt_service_routine - isr
585 * @ha: pointer to host adapter structure.
586 *
587 * This is the main interrupt service routine.
588 * hardware_lock locked upon entry. runs in interrupt context.
589 **/
590void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
591 uint32_t intr_status)
592{
593 /* Process response queue interrupt. */
594 if (intr_status & CSR_SCSI_COMPLETION_INTR)
595 qla4xxx_process_response_queue(ha);
596
597 /* Process mailbox/asynch event interrupt.*/
598 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
599 qla4xxx_isr_decode_mailbox(ha,
600 readl(&ha->reg->mailbox[0]));
601
602 /* Clear Mailbox Interrupt */
603 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
604 &ha->reg->ctrl_status);
605 readl(&ha->reg->ctrl_status);
606 }
607}
608
609/**
610 * qla4xxx_intr_handler - hardware interrupt handler.
611 * @irq: Unused
612 * @dev_id: Pointer to host adapter structure
613 * @regs: Unused
614 **/
615irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
616{
617 struct scsi_qla_host *ha;
618 uint32_t intr_status;
619 unsigned long flags = 0;
620 uint8_t reqs_count = 0;
621
622 ha = (struct scsi_qla_host *) dev_id;
623 if (!ha) {
624 DEBUG2(printk(KERN_INFO
625 "qla4xxx: Interrupt with NULL host ptr\n"));
626 return IRQ_NONE;
627 }
628
629 spin_lock_irqsave(&ha->hardware_lock, flags);
630
631 /*
632 * Repeatedly service interrupts up to a maximum of
633 * MAX_REQS_SERVICED_PER_INTR
634 */
635 while (1) {
636 /*
637 * Read interrupt status
638 */
639 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
640 ha->response_out)
641 intr_status = CSR_SCSI_COMPLETION_INTR;
642 else
643 intr_status = readl(&ha->reg->ctrl_status);
644
645 if ((intr_status &
646 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
647 0) {
648 if (reqs_count == 0)
649 ha->spurious_int_count++;
650 break;
651 }
652
653 if (intr_status & CSR_FATAL_ERROR) {
654 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
655 "Status 0x%04x\n", ha->host_no,
656 readl(isp_port_error_status (ha))));
657
658 /* Issue Soft Reset to clear this error condition.
659 * This will prevent the RISC from repeatedly
660 * interrupting the driver; thus, allowing the DPC to
661 * get scheduled to continue error recovery.
662 * NOTE: Disabling RISC interrupts does not work in
663 * this case, as CSR_FATAL_ERROR overrides
664 * CSR_SCSI_INTR_ENABLE */
665 if ((readl(&ha->reg->ctrl_status) &
666 CSR_SCSI_RESET_INTR) == 0) {
667 writel(set_rmask(CSR_SOFT_RESET),
668 &ha->reg->ctrl_status);
669 readl(&ha->reg->ctrl_status);
670 }
671
672 writel(set_rmask(CSR_FATAL_ERROR),
673 &ha->reg->ctrl_status);
674 readl(&ha->reg->ctrl_status);
675
676 __qla4xxx_disable_intrs(ha);
677
678 set_bit(DPC_RESET_HA, &ha->dpc_flags);
679
680 break;
681 } else if (intr_status & CSR_SCSI_RESET_INTR) {
682 clear_bit(AF_ONLINE, &ha->flags);
683 __qla4xxx_disable_intrs(ha);
684
685 writel(set_rmask(CSR_SCSI_RESET_INTR),
686 &ha->reg->ctrl_status);
687 readl(&ha->reg->ctrl_status);
688
689 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
690
691 break;
692 } else if (intr_status & INTR_PENDING) {
693 qla4xxx_interrupt_service_routine(ha, intr_status);
694 ha->total_io_count++;
695 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
696 break;
697
698 intr_status = 0;
699 }
700 }
701
702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
703
704 return IRQ_HANDLED;
705}
706
707/**
708 * qla4xxx_process_aen - processes AENs generated by firmware
709 * @ha: pointer to host adapter structure.
710 * @process_aen: type of AENs to process
711 *
712 * Processes specific types of Asynchronous Events generated by firmware.
713 * The type of AENs to process is specified by process_aen and can be
714 * PROCESS_ALL_AENS 0
715 * FLUSH_DDB_CHANGED_AENS 1
716 * RELOGIN_DDB_CHANGED_AENS 2
717 **/
718void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
719{
720 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
721 struct aen *aen;
722 int i;
723 unsigned long flags;
724
725 spin_lock_irqsave(&ha->hardware_lock, flags);
726 while (ha->aen_out != ha->aen_in) {
727 /* Advance pointers for next entry */
728 if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
729 ha->aen_out = 0;
730 else
731 ha->aen_out++;
732
733 ha->aen_q_count++;
734 aen = &ha->aen_q[ha->aen_out];
735
736 /* copy aen information to local structure */
737 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
738 mbox_sts[i] = aen->mbox_sts[i];
739
740 spin_unlock_irqrestore(&ha->hardware_lock, flags);
741
742 DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
743 "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
744 mbox_sts[0], mbox_sts[2], mbox_sts[3],
745 mbox_sts[1], mbox_sts[4]));
746
747 switch (mbox_sts[0]) {
748 case MBOX_ASTS_DATABASE_CHANGED:
749 if (process_aen == FLUSH_DDB_CHANGED_AENS) {
750 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
751 "[%d] state=%04x FLUSHED!\n",
752 ha->host_no, ha->aen_out,
753 mbox_sts[0], mbox_sts[2],
754 mbox_sts[3]));
755 break;
756 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
757 /* for use during init time, we only want to
758 * relogin non-active ddbs */
759 struct ddb_entry *ddb_entry;
760
761 ddb_entry =
762 /* FIXME: name length? */
763 qla4xxx_lookup_ddb_by_fw_index(ha,
764 mbox_sts[2]);
765 if (!ddb_entry)
766 break;
767
768 ddb_entry->dev_scan_wait_to_complete_relogin =
769 0;
770 ddb_entry->dev_scan_wait_to_start_relogin =
771 jiffies +
772 ((ddb_entry->default_time2wait +
773 4) * HZ);
774
775 DEBUG2(printk("scsi%ld: ddb index [%d] initate"
776 " RELOGIN after %d seconds\n",
777 ha->host_no,
778 ddb_entry->fw_ddb_index,
779 ddb_entry->default_time2wait +
780 4));
781 break;
782 }
783
784 if (mbox_sts[1] == 0) { /* Global DB change. */
785 qla4xxx_reinitialize_ddb_list(ha);
786 } else if (mbox_sts[1] == 1) { /* Specific device. */
787 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
788 mbox_sts[3]);
789 }
790 break;
791 }
792 spin_lock_irqsave(&ha->hardware_lock, flags);
793 }
794 spin_unlock_irqrestore(&ha->hardware_lock, flags);
795
796}
797
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
new file mode 100644
index 00000000000..ed977f70b2d
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -0,0 +1,930 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10
11/**
12 * qla4xxx_mailbox_command - issues mailbox commands
13 * @ha: Pointer to host adapter structure.
14 * @inCount: number of mailbox registers to load.
15 * @outCount: number of mailbox registers to return.
16 * @mbx_cmd: data pointer for mailbox in registers.
17 * @mbx_sts: data pointer for mailbox out registers.
18 *
19 * This routine sssue mailbox commands and waits for completion.
20 * If outCount is 0, this routine completes successfully WITHOUT waiting
21 * for the mailbox command to complete.
22 **/
23int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
24 uint8_t outCount, uint32_t *mbx_cmd,
25 uint32_t *mbx_sts)
26{
27 int status = QLA_ERROR;
28 uint8_t i;
29 u_long wait_count;
30 uint32_t intr_status;
31 unsigned long flags = 0;
32 DECLARE_WAITQUEUE(wait, current);
33
34 mutex_lock(&ha->mbox_sem);
35
36 /* Mailbox code active */
37 set_bit(AF_MBOX_COMMAND, &ha->flags);
38
39 /* Make sure that pointers are valid */
40 if (!mbx_cmd || !mbx_sts) {
41 DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
42 "pointer\n", ha->host_no, __func__));
43 goto mbox_exit;
44 }
45
46 /* To prevent overwriting mailbox registers for a command that has
47 * not yet been serviced, check to see if a previously issued
48 * mailbox command is interrupting.
49 * -----------------------------------------------------------------
50 */
51 spin_lock_irqsave(&ha->hardware_lock, flags);
52 intr_status = readl(&ha->reg->ctrl_status);
53 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
54 /* Service existing interrupt */
55 qla4xxx_interrupt_service_routine(ha, intr_status);
56 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
57 }
58
59 /* Send the mailbox command to the firmware */
60 ha->mbox_status_count = outCount;
61 for (i = 0; i < outCount; i++)
62 ha->mbox_status[i] = 0;
63
64 /* Load all mailbox registers, except mailbox 0. */
65 for (i = 1; i < inCount; i++)
66 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
67
68 /* Wakeup firmware */
69 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
70 readl(&ha->reg->mailbox[0]);
71 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
72 readl(&ha->reg->ctrl_status);
73 spin_unlock_irqrestore(&ha->hardware_lock, flags);
74
75 /* Wait for completion */
76 set_current_state(TASK_UNINTERRUPTIBLE);
77 add_wait_queue(&ha->mailbox_wait_queue, &wait);
78
79 /*
80 * If we don't want status, don't wait for the mailbox command to
81 * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
82 * you must poll the inbound Interrupt Mask for completion.
83 */
84 if (outCount == 0) {
85 status = QLA_SUCCESS;
86 set_current_state(TASK_RUNNING);
87 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
88 goto mbox_exit;
89 }
90 /* Wait for command to complete */
91 wait_count = jiffies + MBOX_TOV * HZ;
92 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
93 if (time_after_eq(jiffies, wait_count))
94 break;
95
96 spin_lock_irqsave(&ha->hardware_lock, flags);
97 intr_status = readl(&ha->reg->ctrl_status);
98 if (intr_status & INTR_PENDING) {
99 /*
100 * Service the interrupt.
101 * The ISR will save the mailbox status registers
102 * to a temporary storage location in the adapter
103 * structure.
104 */
105 ha->mbox_status_count = outCount;
106 qla4xxx_interrupt_service_routine(ha, intr_status);
107 }
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 msleep(10);
110 }
111 set_current_state(TASK_RUNNING);
112 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
113
114 /* Check for mailbox timeout. */
115 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
116 DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
117 " Scheduling Adapter Reset\n", ha->host_no,
118 mbx_cmd[0]));
119 ha->mailbox_timeout_count++;
120 mbx_sts[0] = (-1);
121 set_bit(DPC_RESET_HA, &ha->dpc_flags);
122 goto mbox_exit;
123 }
124
125 /*
126 * Copy the mailbox out registers to the caller's mailbox in/out
127 * structure.
128 */
129 spin_lock_irqsave(&ha->hardware_lock, flags);
130 for (i = 0; i < outCount; i++)
131 mbx_sts[i] = ha->mbox_status[i];
132
133 /* Set return status and error flags (if applicable). */
134 switch (ha->mbox_status[0]) {
135 case MBOX_STS_COMMAND_COMPLETE:
136 status = QLA_SUCCESS;
137 break;
138
139 case MBOX_STS_INTERMEDIATE_COMPLETION:
140 status = QLA_SUCCESS;
141 break;
142
143 case MBOX_STS_BUSY:
144 DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
145 ha->host_no, __func__, mbx_cmd[0]));
146 ha->mailbox_timeout_count++;
147 break;
148
149 default:
150 DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
151 "sts = %08X ****\n", ha->host_no, __func__,
152 mbx_cmd[0], mbx_sts[0]));
153 break;
154 }
155 spin_unlock_irqrestore(&ha->hardware_lock, flags);
156
157mbox_exit:
158 clear_bit(AF_MBOX_COMMAND, &ha->flags);
159 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
160 mutex_unlock(&ha->mbox_sem);
161
162 return status;
163}
164
165
166/**
167 * qla4xxx_issue_iocb - issue mailbox iocb command
168 * @ha: adapter state pointer.
169 * @buffer: buffer pointer.
170 * @phys_addr: physical address of buffer.
171 * @size: size of buffer.
172 *
173 * Issues iocbs via mailbox commands.
174 * TARGET_QUEUE_LOCK must be released.
175 * ADAPTER_STATE_LOCK must be released.
176 **/
177int
178qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
179 dma_addr_t phys_addr, size_t size)
180{
181 uint32_t mbox_cmd[MBOX_REG_COUNT];
182 uint32_t mbox_sts[MBOX_REG_COUNT];
183 int status;
184
185 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
186 memset(&mbox_sts, 0, sizeof(mbox_sts));
187 mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
188 mbox_cmd[1] = 0;
189 mbox_cmd[2] = LSDW(phys_addr);
190 mbox_cmd[3] = MSDW(phys_addr);
191 status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
192 return status;
193}
194
195int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
196 uint16_t fw_ddb_index,
197 uint16_t connection_id,
198 uint16_t option)
199{
200 uint32_t mbox_cmd[MBOX_REG_COUNT];
201 uint32_t mbox_sts[MBOX_REG_COUNT];
202
203 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
204 memset(&mbox_sts, 0, sizeof(mbox_sts));
205 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
206 mbox_cmd[1] = fw_ddb_index;
207 mbox_cmd[2] = connection_id;
208 mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
209 if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
210 QLA_SUCCESS) {
211 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
212 "option %04x failed sts %04X %04X",
213 ha->host_no, __func__,
214 option, mbox_sts[0], mbox_sts[1]));
215 if (mbox_sts[0] == 0x4005)
216 DEBUG2(printk("%s reason %04X\n", __func__,
217 mbox_sts[1]));
218 }
219 return QLA_SUCCESS;
220}
221
222int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
223 uint16_t fw_ddb_index)
224{
225 uint32_t mbox_cmd[MBOX_REG_COUNT];
226 uint32_t mbox_sts[MBOX_REG_COUNT];
227
228 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
229 memset(&mbox_sts, 0, sizeof(mbox_sts));
230 mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
231 mbox_cmd[1] = fw_ddb_index;
232 if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
233 QLA_SUCCESS)
234 return QLA_ERROR;
235
236 return QLA_SUCCESS;
237}
238
239/**
240 * qla4xxx_initialize_fw_cb - initializes firmware control block.
241 * @ha: Pointer to host adapter structure.
242 **/
243int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
244{
245 struct init_fw_ctrl_blk *init_fw_cb;
246 dma_addr_t init_fw_cb_dma;
247 uint32_t mbox_cmd[MBOX_REG_COUNT];
248 uint32_t mbox_sts[MBOX_REG_COUNT];
249 int status = QLA_ERROR;
250
251 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
252 sizeof(struct init_fw_ctrl_blk),
253 &init_fw_cb_dma, GFP_KERNEL);
254 if (init_fw_cb == NULL) {
255 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
256 ha->host_no, __func__));
257 return 10;
258 }
259 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
260
261 /* Get Initialize Firmware Control Block. */
262 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
263 memset(&mbox_sts, 0, sizeof(mbox_sts));
264 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
265 mbox_cmd[2] = LSDW(init_fw_cb_dma);
266 mbox_cmd[3] = MSDW(init_fw_cb_dma);
267 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
268 QLA_SUCCESS) {
269 dma_free_coherent(&ha->pdev->dev,
270 sizeof(struct init_fw_ctrl_blk),
271 init_fw_cb, init_fw_cb_dma);
272 return status;
273 }
274
275 /* Initialize request and response queues. */
276 qla4xxx_init_rings(ha);
277
278 /* Fill in the request and response queue information. */
279 init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
280 init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
281 init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
282 init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
283 init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
284 init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
285 init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
286 init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
287 init_fw_cb->ShadowRegBufAddrLo =
288 cpu_to_le32(LSDW(ha->shadow_regs_dma));
289 init_fw_cb->ShadowRegBufAddrHi =
290 cpu_to_le32(MSDW(ha->shadow_regs_dma));
291
292 /* Set up required options. */
293 init_fw_cb->FwOptions |=
294 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
295 FWOPT_INITIATOR_MODE);
296 init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
297
298 /* Save some info in adapter structure. */
299 ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
300 ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
301 ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
302 memcpy(ha->ip_address, init_fw_cb->IPAddr,
303 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
304 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
305 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
306 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
307 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
308 memcpy(ha->name_string, init_fw_cb->iSCSINameString,
309 min(sizeof(ha->name_string),
310 sizeof(init_fw_cb->iSCSINameString)));
311 memcpy(ha->alias, init_fw_cb->Alias,
312 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
313
314 /* Save Command Line Paramater info */
315 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
316 ha->discovery_wait = ql4xdiscoverywait;
317
318 /* Send Initialize Firmware Control Block. */
319 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
320 mbox_cmd[1] = 0;
321 mbox_cmd[2] = LSDW(init_fw_cb_dma);
322 mbox_cmd[3] = MSDW(init_fw_cb_dma);
323 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
324 QLA_SUCCESS)
325 status = QLA_SUCCESS;
326 else {
327 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
328 "failed w/ status %04X\n", ha->host_no, __func__,
329 mbox_sts[0]));
330 }
331 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
332 init_fw_cb, init_fw_cb_dma);
333
334 return status;
335}
336
337/**
338 * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
339 * @ha: Pointer to host adapter structure.
340 **/
341int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
342{
343 struct init_fw_ctrl_blk *init_fw_cb;
344 dma_addr_t init_fw_cb_dma;
345 uint32_t mbox_cmd[MBOX_REG_COUNT];
346 uint32_t mbox_sts[MBOX_REG_COUNT];
347
348 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
349 sizeof(struct init_fw_ctrl_blk),
350 &init_fw_cb_dma, GFP_KERNEL);
351 if (init_fw_cb == NULL) {
352 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
353 __func__);
354 return 10;
355 }
356
357 /* Get Initialize Firmware Control Block. */
358 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
359 memset(&mbox_sts, 0, sizeof(mbox_sts));
360 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
361 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
362 mbox_cmd[2] = LSDW(init_fw_cb_dma);
363 mbox_cmd[3] = MSDW(init_fw_cb_dma);
364
365 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
366 QLA_SUCCESS) {
367 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
368 ha->host_no, __func__));
369 dma_free_coherent(&ha->pdev->dev,
370 sizeof(struct init_fw_ctrl_blk),
371 init_fw_cb, init_fw_cb_dma);
372 return QLA_ERROR;
373 }
374
375 /* Save IP Address. */
376 memcpy(ha->ip_address, init_fw_cb->IPAddr,
377 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
378 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
379 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
380 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
381 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
382
383 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
384 init_fw_cb, init_fw_cb_dma);
385
386 return QLA_SUCCESS;
387}
388
389/**
390 * qla4xxx_get_firmware_state - gets firmware state of HBA
391 * @ha: Pointer to host adapter structure.
392 **/
393int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
394{
395 uint32_t mbox_cmd[MBOX_REG_COUNT];
396 uint32_t mbox_sts[MBOX_REG_COUNT];
397
398 /* Get firmware version */
399 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
400 memset(&mbox_sts, 0, sizeof(mbox_sts));
401 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
402 if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
403 QLA_SUCCESS) {
404 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
405 "status %04X\n", ha->host_no, __func__,
406 mbox_sts[0]));
407 return QLA_ERROR;
408 }
409 ha->firmware_state = mbox_sts[1];
410 ha->board_id = mbox_sts[2];
411 ha->addl_fw_state = mbox_sts[3];
412 DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
413 ha->host_no, __func__, ha->firmware_state);)
414
415 return QLA_SUCCESS;
416}
417
418/**
419 * qla4xxx_get_firmware_status - retrieves firmware status
420 * @ha: Pointer to host adapter structure.
421 **/
422int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
423{
424 uint32_t mbox_cmd[MBOX_REG_COUNT];
425 uint32_t mbox_sts[MBOX_REG_COUNT];
426
427 /* Get firmware version */
428 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
429 memset(&mbox_sts, 0, sizeof(mbox_sts));
430 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
431 if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
432 QLA_SUCCESS) {
433 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
434 "status %04X\n", ha->host_no, __func__,
435 mbox_sts[0]));
436 return QLA_ERROR;
437 }
438
439 /* High-water mark of IOCBs */
440 ha->iocb_hiwat = mbox_sts[2];
441 if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
442 ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
443 else
444 dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d "
445 "firmare IOCBs available (%d).\n",
446 IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
447
448 return QLA_SUCCESS;
449}
450
451/**
452 * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
453 * @ha: Pointer to host adapter structure.
454 * @fw_ddb_index: Firmware's device database index
455 * @fw_ddb_entry: Pointer to firmware's device database entry structure
456 * @num_valid_ddb_entries: Pointer to number of valid ddb entries
457 * @next_ddb_index: Pointer to next valid device database index
458 * @fw_ddb_device_state: Pointer to device state
459 **/
460int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
461 uint16_t fw_ddb_index,
462 struct dev_db_entry *fw_ddb_entry,
463 dma_addr_t fw_ddb_entry_dma,
464 uint32_t *num_valid_ddb_entries,
465 uint32_t *next_ddb_index,
466 uint32_t *fw_ddb_device_state,
467 uint32_t *conn_err_detail,
468 uint16_t *tcp_source_port_num,
469 uint16_t *connection_id)
470{
471 int status = QLA_ERROR;
472 uint32_t mbox_cmd[MBOX_REG_COUNT];
473 uint32_t mbox_sts[MBOX_REG_COUNT];
474
475 /* Make sure the device index is valid */
476 if (fw_ddb_index >= MAX_DDB_ENTRIES) {
477 DEBUG2(printk("scsi%ld: %s: index [%d] out of range.\n",
478 ha->host_no, __func__, fw_ddb_index));
479 goto exit_get_fwddb;
480 }
481 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
482 memset(&mbox_sts, 0, sizeof(mbox_sts));
483 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
484 mbox_cmd[1] = (uint32_t) fw_ddb_index;
485 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
486 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
487 if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
488 QLA_ERROR) {
489 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
490 " with status 0x%04X\n", ha->host_no, __func__,
491 mbox_sts[0]));
492 goto exit_get_fwddb;
493 }
494 if (fw_ddb_index != mbox_sts[1]) {
495 DEBUG2(printk("scsi%ld: %s: index mismatch [%d] != [%d].\n",
496 ha->host_no, __func__, fw_ddb_index,
497 mbox_sts[1]));
498 goto exit_get_fwddb;
499 }
500 if (fw_ddb_entry) {
501 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
502 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
503 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
504 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
505 fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
506 fw_ddb_entry->ipAddr[3],
507 le16_to_cpu(fw_ddb_entry->portNumber),
508 fw_ddb_entry->iscsiName);
509 }
510 if (num_valid_ddb_entries)
511 *num_valid_ddb_entries = mbox_sts[2];
512 if (next_ddb_index)
513 *next_ddb_index = mbox_sts[3];
514 if (fw_ddb_device_state)
515 *fw_ddb_device_state = mbox_sts[4];
516
517 /*
518 * RA: This mailbox has been changed to pass connection error and
519 * details. Its true for ISP4010 as per Version E - Not sure when it
520 * was changed. Get the time2wait from the fw_dd_entry field :
521 * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
522 * struct.
523 */
524 if (conn_err_detail)
525 *conn_err_detail = mbox_sts[5];
526 if (tcp_source_port_num)
527 *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
528 if (connection_id)
529 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
530 status = QLA_SUCCESS;
531
532exit_get_fwddb:
533 return status;
534}
535
536/**
537 * qla4xxx_set_fwddb_entry - sets a ddb entry.
538 * @ha: Pointer to host adapter structure.
539 * @fw_ddb_index: Firmware's device database index
540 * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL.
541 *
542 * This routine initializes or updates the adapter's device database
543 * entry for the specified device. It also triggers a login for the
544 * specified device. Therefore, it may also be used as a secondary
545 * login routine when a NULL pointer is specified for the fw_ddb_entry.
546 **/
547int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
548 dma_addr_t fw_ddb_entry_dma)
549{
550 uint32_t mbox_cmd[MBOX_REG_COUNT];
551 uint32_t mbox_sts[MBOX_REG_COUNT];
552
553 /* Do not wait for completion. The firmware will send us an
554 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
555 */
556 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
557 memset(&mbox_sts, 0, sizeof(mbox_sts));
558
559 mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
560 mbox_cmd[1] = (uint32_t) fw_ddb_index;
561 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
562 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
563 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
564}
565
566int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
567 uint16_t fw_ddb_index)
568{
569 int status = QLA_ERROR;
570 uint32_t mbox_cmd[MBOX_REG_COUNT];
571 uint32_t mbox_sts[MBOX_REG_COUNT];
572
573 /* Do not wait for completion. The firmware will send us an
574 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
575 */
576 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
577 memset(&mbox_sts, 0, sizeof(mbox_sts));
578 mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
579 mbox_cmd[1] = (uint32_t) fw_ddb_index;
580 mbox_cmd[2] = 0;
581 mbox_cmd[3] = 0;
582 mbox_cmd[4] = 0;
583 status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
584 DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
585 __func__, fw_ddb_index, status, mbox_sts[0],
586 mbox_sts[1]);)
587
588 return status;
589}
590
591/**
592 * qla4xxx_get_crash_record - retrieves crash record.
593 * @ha: Pointer to host adapter structure.
594 *
595 * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
596 **/
597void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
598{
599 uint32_t mbox_cmd[MBOX_REG_COUNT];
600 uint32_t mbox_sts[MBOX_REG_COUNT];
601 struct crash_record *crash_record = NULL;
602 dma_addr_t crash_record_dma = 0;
603 uint32_t crash_record_size = 0;
604 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
605 memset(&mbox_sts, 0, sizeof(mbox_cmd));
606
607 /* Get size of crash record. */
608 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
609 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
610 QLA_SUCCESS) {
611 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
612 ha->host_no, __func__));
613 goto exit_get_crash_record;
614 }
615 crash_record_size = mbox_sts[4];
616 if (crash_record_size == 0) {
617 DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
618 ha->host_no, __func__));
619 goto exit_get_crash_record;
620 }
621
622 /* Alloc Memory for Crash Record. */
623 crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
624 &crash_record_dma, GFP_KERNEL);
625 if (crash_record == NULL)
626 goto exit_get_crash_record;
627
628 /* Get Crash Record. */
629 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
630 mbox_cmd[2] = LSDW(crash_record_dma);
631 mbox_cmd[3] = MSDW(crash_record_dma);
632 mbox_cmd[4] = crash_record_size;
633 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
634 QLA_SUCCESS)
635 goto exit_get_crash_record;
636
637 /* Dump Crash Record. */
638
639exit_get_crash_record:
640 if (crash_record)
641 dma_free_coherent(&ha->pdev->dev, crash_record_size,
642 crash_record, crash_record_dma);
643}
644
645/**
646 * qla4xxx_get_conn_event_log - retrieves connection event log
647 * @ha: Pointer to host adapter structure.
648 **/
649void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
650{
651 uint32_t mbox_cmd[MBOX_REG_COUNT];
652 uint32_t mbox_sts[MBOX_REG_COUNT];
653 struct conn_event_log_entry *event_log = NULL;
654 dma_addr_t event_log_dma = 0;
655 uint32_t event_log_size = 0;
656 uint32_t num_valid_entries;
657 uint32_t oldest_entry = 0;
658 uint32_t max_event_log_entries;
659 uint8_t i;
660
661
662 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
663 memset(&mbox_sts, 0, sizeof(mbox_cmd));
664
665 /* Get size of crash record. */
666 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
667 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
668 QLA_SUCCESS)
669 goto exit_get_event_log;
670
671 event_log_size = mbox_sts[4];
672 if (event_log_size == 0)
673 goto exit_get_event_log;
674
675 /* Alloc Memory for Crash Record. */
676 event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
677 &event_log_dma, GFP_KERNEL);
678 if (event_log == NULL)
679 goto exit_get_event_log;
680
681 /* Get Crash Record. */
682 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
683 mbox_cmd[2] = LSDW(event_log_dma);
684 mbox_cmd[3] = MSDW(event_log_dma);
685 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
686 QLA_SUCCESS) {
687 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
688 "log!\n", ha->host_no, __func__));
689 goto exit_get_event_log;
690 }
691
692 /* Dump Event Log. */
693 num_valid_entries = mbox_sts[1];
694
695 max_event_log_entries = event_log_size /
696 sizeof(struct conn_event_log_entry);
697
698 if (num_valid_entries > max_event_log_entries)
699 oldest_entry = num_valid_entries % max_event_log_entries;
700
701 DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
702 ha->host_no, num_valid_entries));
703
704 if (extended_error_logging == 3) {
705 if (oldest_entry == 0) {
706 /* Circular Buffer has not wrapped around */
707 for (i=0; i < num_valid_entries; i++) {
708 qla4xxx_dump_buffer((uint8_t *)event_log+
709 (i*sizeof(*event_log)),
710 sizeof(*event_log));
711 }
712 }
713 else {
714 /* Circular Buffer has wrapped around -
715 * display accordingly*/
716 for (i=oldest_entry; i < max_event_log_entries; i++) {
717 qla4xxx_dump_buffer((uint8_t *)event_log+
718 (i*sizeof(*event_log)),
719 sizeof(*event_log));
720 }
721 for (i=0; i < oldest_entry; i++) {
722 qla4xxx_dump_buffer((uint8_t *)event_log+
723 (i*sizeof(*event_log)),
724 sizeof(*event_log));
725 }
726 }
727 }
728
729exit_get_event_log:
730 if (event_log)
731 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
732 event_log_dma);
733}
734
735/**
736 * qla4xxx_reset_lun - issues LUN Reset
737 * @ha: Pointer to host adapter structure.
738 * @db_entry: Pointer to device database entry
739 * @un_entry: Pointer to lun entry structure
740 *
741 * This routine performs a LUN RESET on the specified target/lun.
742 * The caller must ensure that the ddb_entry and lun_entry pointers
743 * are valid before calling this routine.
744 **/
745int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
746 int lun)
747{
748 uint32_t mbox_cmd[MBOX_REG_COUNT];
749 uint32_t mbox_sts[MBOX_REG_COUNT];
750 int status = QLA_SUCCESS;
751
752 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
753 ddb_entry->os_target_id, lun));
754
755 /*
756 * Send lun reset command to ISP, so that the ISP will return all
757 * outstanding requests with RESET status
758 */
759 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
760 memset(&mbox_sts, 0, sizeof(mbox_sts));
761 mbox_cmd[0] = MBOX_CMD_LUN_RESET;
762 mbox_cmd[1] = ddb_entry->fw_ddb_index;
763 mbox_cmd[2] = lun << 8;
764 mbox_cmd[5] = 0x01; /* Immediate Command Enable */
765 qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
766 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
767 mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
768 status = QLA_ERROR;
769
770 return status;
771}
772
773
774int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
775 uint32_t offset, uint32_t len)
776{
777 uint32_t mbox_cmd[MBOX_REG_COUNT];
778 uint32_t mbox_sts[MBOX_REG_COUNT];
779
780 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
781 memset(&mbox_sts, 0, sizeof(mbox_sts));
782 mbox_cmd[0] = MBOX_CMD_READ_FLASH;
783 mbox_cmd[1] = LSDW(dma_addr);
784 mbox_cmd[2] = MSDW(dma_addr);
785 mbox_cmd[3] = offset;
786 mbox_cmd[4] = len;
787 if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
788 QLA_SUCCESS) {
789 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
790 "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
791 __func__, mbox_sts[0], mbox_sts[1], offset, len));
792 return QLA_ERROR;
793 }
794 return QLA_SUCCESS;
795}
796
797/**
798 * qla4xxx_get_fw_version - gets firmware version
799 * @ha: Pointer to host adapter structure.
800 *
801 * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
802 * hold an address for data. Make sure that we write 0 to those mailboxes,
803 * if unused.
804 **/
805int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
806{
807 uint32_t mbox_cmd[MBOX_REG_COUNT];
808 uint32_t mbox_sts[MBOX_REG_COUNT];
809
810 /* Get firmware version. */
811 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
812 memset(&mbox_sts, 0, sizeof(mbox_sts));
813 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
814 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
815 QLA_SUCCESS) {
816 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
817 "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
818 return QLA_ERROR;
819 }
820
821 /* Save firmware version information. */
822 ha->firmware_version[0] = mbox_sts[1];
823 ha->firmware_version[1] = mbox_sts[2];
824 ha->patch_number = mbox_sts[3];
825 ha->build_number = mbox_sts[4];
826
827 return QLA_SUCCESS;
828}
829
830int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
831{
832 uint32_t mbox_cmd[MBOX_REG_COUNT];
833 uint32_t mbox_sts[MBOX_REG_COUNT];
834
835 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
836 memset(&mbox_sts, 0, sizeof(mbox_sts));
837
838 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
839 mbox_cmd[2] = LSDW(dma_addr);
840 mbox_cmd[3] = MSDW(dma_addr);
841
842 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
843 QLA_SUCCESS) {
844 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
845 ha->host_no, __func__, mbox_sts[0]));
846 return QLA_ERROR;
847 }
848 return QLA_SUCCESS;
849}
850
851int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
852{
853 uint32_t mbox_cmd[MBOX_REG_COUNT];
854 uint32_t mbox_sts[MBOX_REG_COUNT];
855
856 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
857 memset(&mbox_sts, 0, sizeof(mbox_sts));
858
859 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
860 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
861
862 if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
863 QLA_SUCCESS) {
864 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
865 *ddb_index = mbox_sts[2];
866 } else {
867 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
868 ha->host_no, __func__, mbox_sts[0]));
869 return QLA_ERROR;
870 }
871 } else {
872 *ddb_index = MAX_PRST_DEV_DB_ENTRIES;
873 }
874
875 return QLA_SUCCESS;
876}
877
878
879int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
880{
881 struct dev_db_entry *fw_ddb_entry;
882 dma_addr_t fw_ddb_entry_dma;
883 uint32_t ddb_index;
884 int ret_val = QLA_SUCCESS;
885
886
887 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
888 sizeof(*fw_ddb_entry),
889 &fw_ddb_entry_dma, GFP_KERNEL);
890 if (!fw_ddb_entry) {
891 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
892 ha->host_no, __func__));
893 ret_val = QLA_ERROR;
894 goto qla4xxx_send_tgts_exit;
895 }
896
897 ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
898 if (ret_val != QLA_SUCCESS)
899 goto qla4xxx_send_tgts_exit;
900
901 ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
902 if (ret_val != QLA_SUCCESS)
903 goto qla4xxx_send_tgts_exit;
904
905 memset((void *)fw_ddb_entry->iSCSIAlias, 0,
906 sizeof(fw_ddb_entry->iSCSIAlias));
907
908 memset((void *)fw_ddb_entry->iscsiName, 0,
909 sizeof(fw_ddb_entry->iscsiName));
910
911 memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
912 memset((void *)fw_ddb_entry->targetAddr, 0,
913 sizeof(fw_ddb_entry->targetAddr));
914
915 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
916 fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
917
918 fw_ddb_entry->ipAddr[0] = *ip;
919 fw_ddb_entry->ipAddr[1] = *(ip + 1);
920 fw_ddb_entry->ipAddr[2] = *(ip + 2);
921 fw_ddb_entry->ipAddr[3] = *(ip + 3);
922
923 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
924
925qla4xxx_send_tgts_exit:
926 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
927 fw_ddb_entry, fw_ddb_entry_dma);
928 return ret_val;
929}
930
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
new file mode 100644
index 00000000000..e3957ca5b64
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -0,0 +1,224 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9
10static inline int eeprom_size(struct scsi_qla_host *ha)
11{
12 return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
13}
14
15static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
16{
17 return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
18 FM93C56A_NO_ADDR_BITS_16;
19}
20
21static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
22{
23 return FM93C56A_DATA_BITS_16;
24}
25
26static int fm93c56a_select(struct scsi_qla_host * ha)
27{
28 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
29
30 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
31 writel(ha->eeprom_cmd_data, isp_nvram(ha));
32 readl(isp_nvram(ha));
33 return 1;
34}
35
36static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
37{
38 int i;
39 int mask;
40 int dataBit;
41 int previousBit;
42
43 /* Clock in a zero, then do the start bit. */
44 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
45 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
46 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
47 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
48 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
49 readl(isp_nvram(ha));
50 mask = 1 << (FM93C56A_CMD_BITS - 1);
51
52 /* Force the previous data bit to be different. */
53 previousBit = 0xffff;
54 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
55 dataBit =
56 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
57 if (previousBit != dataBit) {
58
59 /*
60 * If the bit changed, then change the DO state to
61 * match.
62 */
63 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
64 previousBit = dataBit;
65 }
66 writel(ha->eeprom_cmd_data | dataBit |
67 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
68 writel(ha->eeprom_cmd_data | dataBit |
69 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
70 readl(isp_nvram(ha));
71 cmd = cmd << 1;
72 }
73 mask = 1 << (eeprom_no_addr_bits(ha) - 1);
74
75 /* Force the previous data bit to be different. */
76 previousBit = 0xffff;
77 for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
78 dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
79 AUBURN_EEPROM_DO_0;
80 if (previousBit != dataBit) {
81 /*
82 * If the bit changed, then change the DO state to
83 * match.
84 */
85 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
86 previousBit = dataBit;
87 }
88 writel(ha->eeprom_cmd_data | dataBit |
89 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
90 writel(ha->eeprom_cmd_data | dataBit |
91 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
92 readl(isp_nvram(ha));
93 addr = addr << 1;
94 }
95 return 1;
96}
97
98static int fm93c56a_deselect(struct scsi_qla_host * ha)
99{
100 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
101 writel(ha->eeprom_cmd_data, isp_nvram(ha));
102 readl(isp_nvram(ha));
103 return 1;
104}
105
106static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
107{
108 int i;
109 int data = 0;
110 int dataBit;
111
112 /* Read the data bits
113 * The first bit is a dummy. Clock right over it. */
114 for (i = 0; i < eeprom_no_data_bits(ha); i++) {
115 writel(ha->eeprom_cmd_data |
116 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
117 writel(ha->eeprom_cmd_data |
118 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
119 dataBit =
120 (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
121 data = (data << 1) | dataBit;
122 }
123
124 *value = data;
125 return 1;
126}
127
128static int eeprom_readword(int eepromAddr, u16 * value,
129 struct scsi_qla_host * ha)
130{
131 fm93c56a_select(ha);
132 fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
133 fm93c56a_datain(ha, value);
134 fm93c56a_deselect(ha);
135 return 1;
136}
137
138/* Hardware_lock must be set before calling */
139u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
140{
141 u16 val;
142
143 /* NOTE: NVRAM uses half-word addresses */
144 eeprom_readword(offset, &val, ha);
145 return val;
146}
147
148int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
149{
150 int status = QLA_ERROR;
151 uint16_t checksum = 0;
152 uint32_t index;
153 unsigned long flags;
154
155 spin_lock_irqsave(&ha->hardware_lock, flags);
156 for (index = 0; index < eeprom_size(ha); index++)
157 checksum += rd_nvram_word(ha, index);
158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
159
160 if (checksum == 0)
161 status = QLA_SUCCESS;
162
163 return status;
164}
165
166/*************************************************************************
167 *
168 * Hardware Semaphore routines
169 *
170 *************************************************************************/
171int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
172{
173 uint32_t value;
174 unsigned long flags;
175 unsigned int seconds = 30;
176
177 DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
178 "0x%x\n", ha->host_no, sem_mask, sem_bits));
179 do {
180 spin_lock_irqsave(&ha->hardware_lock, flags);
181 writel((sem_mask | sem_bits), isp_semaphore(ha));
182 value = readw(isp_semaphore(ha));
183 spin_unlock_irqrestore(&ha->hardware_lock, flags);
184 if ((value & (sem_mask >> 16)) == sem_bits) {
185 DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
186 "code = 0x%x\n", ha->host_no,
187 sem_mask, sem_bits));
188 return QLA_SUCCESS;
189 }
190 ssleep(1);
191 } while (--seconds);
192 return QLA_ERROR;
193}
194
195void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
196{
197 unsigned long flags;
198
199 spin_lock_irqsave(&ha->hardware_lock, flags);
200 writel(sem_mask, isp_semaphore(ha));
201 readl(isp_semaphore(ha));
202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
203
204 DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
205 sem_mask));
206}
207
208int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
209{
210 uint32_t value;
211 unsigned long flags;
212
213 spin_lock_irqsave(&ha->hardware_lock, flags);
214 writel((sem_mask | sem_bits), isp_semaphore(ha));
215 value = readw(isp_semaphore(ha));
216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
217 if ((value & (sem_mask >> 16)) == sem_bits) {
218 DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
219 "0x%x, sema code=0x%x\n", ha->host_no,
220 sem_mask, sem_bits, value));
221 return 1;
222 }
223 return 0;
224}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
new file mode 100644
index 00000000000..08e2aed8c6c
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -0,0 +1,256 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef _QL4XNVRM_H_
9#define _QL4XNVRM_H_
10
11/*
12 * AM29LV Flash definitions
13 */
14#define FM93C56A_SIZE_8 0x100
15#define FM93C56A_SIZE_16 0x80
16#define FM93C66A_SIZE_8 0x200
17#define FM93C66A_SIZE_16 0x100/* 4010 */
18#define FM93C86A_SIZE_16 0x400/* 4022 */
19
20#define FM93C56A_START 0x1
21
22// Commands
23#define FM93C56A_READ 0x2
24#define FM93C56A_WEN 0x0
25#define FM93C56A_WRITE 0x1
26#define FM93C56A_WRITE_ALL 0x0
27#define FM93C56A_WDS 0x0
28#define FM93C56A_ERASE 0x3
29#define FM93C56A_ERASE_ALL 0x0
30
31/* Command Extentions */
32#define FM93C56A_WEN_EXT 0x3
33#define FM93C56A_WRITE_ALL_EXT 0x1
34#define FM93C56A_WDS_EXT 0x0
35#define FM93C56A_ERASE_ALL_EXT 0x2
36
37/* Address Bits */
38#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
39#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
40#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
41
42/* Data Bits */
43#define FM93C56A_DATA_BITS_16 16
44#define FM93C56A_DATA_BITS_8 8
45
46/* Special Bits */
47#define FM93C56A_READ_DUMMY_BITS 1
48#define FM93C56A_READY 0
49#define FM93C56A_BUSY 1
50#define FM93C56A_CMD_BITS 2
51
52/* Auburn Bits */
53#define AUBURN_EEPROM_DI 0x8
54#define AUBURN_EEPROM_DI_0 0x0
55#define AUBURN_EEPROM_DI_1 0x8
56#define AUBURN_EEPROM_DO 0x4
57#define AUBURN_EEPROM_DO_0 0x0
58#define AUBURN_EEPROM_DO_1 0x4
59#define AUBURN_EEPROM_CS 0x2
60#define AUBURN_EEPROM_CS_0 0x0
61#define AUBURN_EEPROM_CS_1 0x2
62#define AUBURN_EEPROM_CLK_RISE 0x1
63#define AUBURN_EEPROM_CLK_FALL 0x0
64
65/* */
66/* EEPROM format */
67/* */
68struct bios_params {
69 uint16_t SpinUpDelay:1;
70 uint16_t BIOSDisable:1;
71 uint16_t MMAPEnable:1;
72 uint16_t BootEnable:1;
73 uint16_t Reserved0:12;
74 uint8_t bootID0:7;
75 uint8_t bootID0Valid:1;
76 uint8_t bootLUN0[8];
77 uint8_t bootID1:7;
78 uint8_t bootID1Valid:1;
79 uint8_t bootLUN1[8];
80 uint16_t MaxLunsPerTarget;
81 uint8_t Reserved1[10];
82};
83
84struct eeprom_port_cfg {
85
86 /* MTU MAC 0 */
87 u16 etherMtu_mac;
88
89 /* Flow Control MAC 0 */
90 u16 pauseThreshold_mac;
91 u16 resumeThreshold_mac;
92 u16 reserved[13];
93};
94
95struct eeprom_function_cfg {
96 u8 reserved[30];
97
98 /* MAC ADDR */
99 u8 macAddress[6];
100 u8 macAddressSecondary[6];
101 u16 subsysVendorId;
102 u16 subsysDeviceId;
103};
104
105struct eeprom_data {
106 union {
107 struct { /* isp4010 */
108 u8 asic_id[4]; /* x00 */
109 u8 version; /* x04 */
110 u8 reserved; /* x05 */
111 u16 board_id; /* x06 */
112#define EEPROM_BOARDID_ELDORADO 1
113#define EEPROM_BOARDID_PLACER 2
114
115#define EEPROM_SERIAL_NUM_SIZE 16
116 u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
117
118 /* ExtHwConfig: */
119 /* Offset = 24bytes
120 *
121 * | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
122 * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
123 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
124 */
125 u16 ext_hw_conf; /* x18 */
126 u8 mac0[6]; /* x1A */
127 u8 mac1[6]; /* x20 */
128 u8 mac2[6]; /* x26 */
129 u8 mac3[6]; /* x2C */
130 u16 etherMtu; /* x32 */
131 u16 macConfig; /* x34 */
132#define MAC_CONFIG_ENABLE_ANEG 0x0001
133#define MAC_CONFIG_ENABLE_PAUSE 0x0002
134 u16 phyConfig; /* x36 */
135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
137 u16 topcat; /* x38 */
138#define TOPCAT_PRESENT 0x0100
139#define TOPCAT_MASK 0xFF00
140
141#define EEPROM_UNUSED_1_SIZE 2
142 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
143 u16 bufletSize; /* x3C */
144 u16 bufletCount; /* x3E */
145 u16 bufletPauseThreshold; /* x40 */
146 u16 tcpWindowThreshold50; /* x42 */
147 u16 tcpWindowThreshold25; /* x44 */
148 u16 tcpWindowThreshold0; /* x46 */
149 u16 ipHashTableBaseHi; /* x48 */
150 u16 ipHashTableBaseLo; /* x4A */
151 u16 ipHashTableSize; /* x4C */
152 u16 tcpHashTableBaseHi; /* x4E */
153 u16 tcpHashTableBaseLo; /* x50 */
154 u16 tcpHashTableSize; /* x52 */
155 u16 ncbTableBaseHi; /* x54 */
156 u16 ncbTableBaseLo; /* x56 */
157 u16 ncbTableSize; /* x58 */
158 u16 drbTableBaseHi; /* x5A */
159 u16 drbTableBaseLo; /* x5C */
160 u16 drbTableSize; /* x5E */
161
162#define EEPROM_UNUSED_2_SIZE 4
163 u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
164 u16 ipReassemblyTimeout; /* x64 */
165 u16 tcpMaxWindowSizeHi; /* x66 */
166 u16 tcpMaxWindowSizeLo; /* x68 */
167 u32 net_ip_addr0; /* x6A Added for TOE
168 * functionality. */
169 u32 net_ip_addr1; /* x6E */
170 u32 scsi_ip_addr0; /* x72 */
171 u32 scsi_ip_addr1; /* x76 */
172#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
173 * for ip addresses */
174 u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
175 u16 subsysVendorId_f0; /* xFA */
176 u16 subsysDeviceId_f0; /* xFC */
177
178 /* Address = 0x7F */
179#define FM93C56A_SIGNATURE 0x9356
180#define FM93C66A_SIGNATURE 0x9366
181 u16 signature; /* xFE */
182
183#define EEPROM_UNUSED_4_SIZE 250
184 u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
185 u16 subsysVendorId_f1; /* x1FA */
186 u16 subsysDeviceId_f1; /* x1FC */
187 u16 checksum; /* x1FE */
188 } __attribute__ ((packed)) isp4010;
189 struct { /* isp4022 */
190 u8 asicId[4]; /* x00 */
191 u8 version; /* x04 */
192 u8 reserved_5; /* x05 */
193 u16 boardId; /* x06 */
194 u8 boardIdStr[16]; /* x08 */
195 u8 serialNumber[16]; /* x18 */
196
197 /* External Hardware Configuration */
198 u16 ext_hw_conf; /* x28 */
199
200 /* MAC 0 CONFIGURATION */
201 struct eeprom_port_cfg macCfg_port0; /* x2A */
202
203 /* MAC 1 CONFIGURATION */
204 struct eeprom_port_cfg macCfg_port1; /* x4A */
205
206 /* DDR SDRAM Configuration */
207 u16 bufletSize; /* x6A */
208 u16 bufletCount; /* x6C */
209 u16 tcpWindowThreshold50; /* x6E */
210 u16 tcpWindowThreshold25; /* x70 */
211 u16 tcpWindowThreshold0; /* x72 */
212 u16 ipHashTableBaseHi; /* x74 */
213 u16 ipHashTableBaseLo; /* x76 */
214 u16 ipHashTableSize; /* x78 */
215 u16 tcpHashTableBaseHi; /* x7A */
216 u16 tcpHashTableBaseLo; /* x7C */
217 u16 tcpHashTableSize; /* x7E */
218 u16 ncbTableBaseHi; /* x80 */
219 u16 ncbTableBaseLo; /* x82 */
220 u16 ncbTableSize; /* x84 */
221 u16 drbTableBaseHi; /* x86 */
222 u16 drbTableBaseLo; /* x88 */
223 u16 drbTableSize; /* x8A */
224 u16 reserved_142[4]; /* x8C */
225
226 /* TCP/IP Parameters */
227 u16 ipReassemblyTimeout; /* x94 */
228 u16 tcpMaxWindowSize; /* x96 */
229 u16 ipSecurity; /* x98 */
230 u8 reserved_156[294]; /* x9A */
231 u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
232 struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
233 u16 reserved_510; /* x1FE */
234
235 /* Address = 512 */
236 u8 oemSpace[432]; /* x200 */
237 struct bios_params sBIOSParams_fn1; /* x3B0 */
238 struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
239 u16 reserved_1022; /* x3FE */
240
241 /* Address = 1024 */
242 u8 reserved_1024[464]; /* x400 */
243 struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
244 u16 reserved_1534; /* x5FE */
245
246 /* Address = 1536 */
247 u8 reserved_1536[432]; /* x600 */
248 struct bios_params sBIOSParams_fn3; /* x7B0 */
249 struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
250 u16 checksum; /* x7FE */
251 } __attribute__ ((packed)) isp4022;
252 };
253};
254
255
256#endif /* _QL4XNVRM_H_ */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
new file mode 100644
index 00000000000..5036ebf013a
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -0,0 +1,1755 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
8
9#include <scsi/scsi_tcq.h>
10#include <scsi/scsicam.h>
11
12#include "ql4_def.h"
13
14/*
15 * Driver version
16 */
17char qla4xxx_version_str[40];
18
19/*
20 * SRB allocation cache
21 */
22static kmem_cache_t *srb_cachep;
23
24/*
25 * Module parameter information and variables
26 */
27int ql4xdiscoverywait = 60;
28module_param(ql4xdiscoverywait, int, S_IRUGO | S_IRUSR);
29MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
30int ql4xdontresethba = 0;
31module_param(ql4xdontresethba, int, S_IRUGO | S_IRUSR);
32MODULE_PARM_DESC(ql4xdontresethba,
33 "Dont reset the HBA when the driver gets 0x8002 AEN "
34 " default it will reset hba :0"
35 " set to 1 to avoid resetting HBA");
36
37int extended_error_logging = 0; /* 0 = off, 1 = log errors */
38module_param(extended_error_logging, int, S_IRUGO | S_IRUSR);
39MODULE_PARM_DESC(extended_error_logging,
40 "Option to enable extended error logging, "
41 "Default is 0 - no logging, 1 - debug logging");
42
43/*
44 * SCSI host template entry points
45 */
46
47void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
48
49/*
50 * iSCSI template entry points
51 */
52static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
53 uint32_t enable, struct sockaddr *dst_addr);
54static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
55 enum iscsi_param param, char *buf);
56static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
57 enum iscsi_param param, char *buf);
58static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
59static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
60static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
61
62/*
63 * SCSI host template entry points
64 */
65static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
66 void (*done) (struct scsi_cmnd *));
67static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
68static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
69static int qla4xxx_slave_alloc(struct scsi_device *device);
70static int qla4xxx_slave_configure(struct scsi_device *device);
71static void qla4xxx_slave_destroy(struct scsi_device *sdev);
72
73static struct scsi_host_template qla4xxx_driver_template = {
74 .module = THIS_MODULE,
75 .name = DRIVER_NAME,
76 .proc_name = DRIVER_NAME,
77 .queuecommand = qla4xxx_queuecommand,
78
79 .eh_device_reset_handler = qla4xxx_eh_device_reset,
80 .eh_host_reset_handler = qla4xxx_eh_host_reset,
81
82 .slave_configure = qla4xxx_slave_configure,
83 .slave_alloc = qla4xxx_slave_alloc,
84 .slave_destroy = qla4xxx_slave_destroy,
85
86 .this_id = -1,
87 .cmd_per_lun = 3,
88 .use_clustering = ENABLE_CLUSTERING,
89 .sg_tablesize = SG_ALL,
90
91 .max_sectors = 0xFFFF,
92};
93
94static struct iscsi_transport qla4xxx_iscsi_transport = {
95 .owner = THIS_MODULE,
96 .name = DRIVER_NAME,
97 .param_mask = ISCSI_CONN_PORT |
98 ISCSI_CONN_ADDRESS |
99 ISCSI_TARGET_NAME |
100 ISCSI_TPGT,
101 .sessiondata_size = sizeof(struct ddb_entry),
102 .host_template = &qla4xxx_driver_template,
103
104 .tgt_dscvr = qla4xxx_tgt_dscvr,
105 .get_conn_param = qla4xxx_conn_get_param,
106 .get_session_param = qla4xxx_sess_get_param,
107 .start_conn = qla4xxx_conn_start,
108 .stop_conn = qla4xxx_conn_stop,
109 .session_recovery_timedout = qla4xxx_recovery_timedout,
110};
111
112static struct scsi_transport_template *qla4xxx_scsi_transport;
113
114static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
115{
116 struct ddb_entry *ddb_entry = session->dd_data;
117 struct scsi_qla_host *ha = ddb_entry->ha;
118
119 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) "
120 "secs exhausted, marking device DEAD.\n", ha->host_no,
121 __func__, ddb_entry->fw_ddb_index,
122 ha->port_down_retry_count));
123
124 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
125
126 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
127 "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
128 queue_work(ha->dpc_thread, &ha->dpc_work);
129}
130
131static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
132{
133 struct iscsi_cls_session *session;
134 struct ddb_entry *ddb_entry;
135
136 session = iscsi_dev_to_session(conn->dev.parent);
137 ddb_entry = session->dd_data;
138
139 DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
140 ddb_entry->ha->host_no, __func__,
141 ddb_entry->fw_ddb_index));
142 iscsi_unblock_session(session);
143 return 0;
144}
145
146static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
147{
148 struct iscsi_cls_session *session;
149 struct ddb_entry *ddb_entry;
150
151 session = iscsi_dev_to_session(conn->dev.parent);
152 ddb_entry = session->dd_data;
153
154 DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
155 ddb_entry->ha->host_no, __func__,
156 ddb_entry->fw_ddb_index));
157 if (flag == STOP_CONN_RECOVER)
158 iscsi_block_session(session);
159 else
160 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
161}
162
163static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
164 enum iscsi_param param, char *buf)
165{
166 struct ddb_entry *ddb_entry = sess->dd_data;
167 int len;
168
169 switch (param) {
170 case ISCSI_PARAM_TARGET_NAME:
171 len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
172 ddb_entry->iscsi_name);
173 break;
174 case ISCSI_PARAM_TPGT:
175 len = sprintf(buf, "%u\n", ddb_entry->tpgt);
176 break;
177 default:
178 return -ENOSYS;
179 }
180
181 return len;
182}
183
184static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
185 enum iscsi_param param, char *buf)
186{
187 struct iscsi_cls_session *session;
188 struct ddb_entry *ddb_entry;
189 int len;
190
191 session = iscsi_dev_to_session(conn->dev.parent);
192 ddb_entry = session->dd_data;
193
194 switch (param) {
195 case ISCSI_PARAM_CONN_PORT:
196 len = sprintf(buf, "%hu\n", ddb_entry->port);
197 break;
198 case ISCSI_PARAM_CONN_ADDRESS:
199 /* TODO: what are the ipv6 bits */
200 len = sprintf(buf, "%u.%u.%u.%u\n",
201 NIPQUAD(ddb_entry->ip_addr));
202 break;
203 default:
204 return -ENOSYS;
205 }
206
207 return len;
208}
209
210static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
211 uint32_t enable, struct sockaddr *dst_addr)
212{
213 struct scsi_qla_host *ha;
214 struct Scsi_Host *shost;
215 struct sockaddr_in *addr;
216 struct sockaddr_in6 *addr6;
217 int ret = 0;
218
219 shost = scsi_host_lookup(host_no);
220 if (IS_ERR(shost)) {
221 printk(KERN_ERR "Could not find host no %u\n", host_no);
222 return -ENODEV;
223 }
224
225 ha = (struct scsi_qla_host *) shost->hostdata;
226
227 switch (type) {
228 case ISCSI_TGT_DSCVR_SEND_TARGETS:
229 if (dst_addr->sa_family == AF_INET) {
230 addr = (struct sockaddr_in *)dst_addr;
231 if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
232 addr->sin_port) != QLA_SUCCESS)
233 ret = -EIO;
234 } else if (dst_addr->sa_family == AF_INET6) {
235 /*
236 * TODO: fix qla4xxx_send_tgts
237 */
238 addr6 = (struct sockaddr_in6 *)dst_addr;
239 if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
240 addr6->sin6_port) != QLA_SUCCESS)
241 ret = -EIO;
242 } else
243 ret = -ENOSYS;
244 break;
245 default:
246 ret = -ENOSYS;
247 }
248
249 scsi_host_put(shost);
250 return ret;
251}
252
253void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
254{
255 if (!ddb_entry->sess)
256 return;
257
258 if (ddb_entry->conn) {
259 iscsi_if_destroy_session_done(ddb_entry->conn);
260 iscsi_destroy_conn(ddb_entry->conn);
261 iscsi_remove_session(ddb_entry->sess);
262 }
263 iscsi_free_session(ddb_entry->sess);
264}
265
266int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
267{
268 int err;
269
270 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
271 if (err) {
272 DEBUG2(printk(KERN_ERR "Could not add session.\n"));
273 return err;
274 }
275
276 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
277 if (!ddb_entry->conn) {
278 iscsi_remove_session(ddb_entry->sess);
279 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
280 return -ENOMEM;
281 }
282
283 ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
284 iscsi_if_create_session_done(ddb_entry->conn);
285 return 0;
286}
287
288struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
289{
290 struct ddb_entry *ddb_entry;
291 struct iscsi_cls_session *sess;
292
293 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
294 if (!sess)
295 return NULL;
296
297 ddb_entry = sess->dd_data;
298 memset(ddb_entry, 0, sizeof(*ddb_entry));
299 ddb_entry->ha = ha;
300 ddb_entry->sess = sess;
301 return ddb_entry;
302}
303
304/*
305 * Timer routines
306 */
307
308static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
309 unsigned long interval)
310{
311 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
312 __func__, ha->host->host_no));
313 init_timer(&ha->timer);
314 ha->timer.expires = jiffies + interval * HZ;
315 ha->timer.data = (unsigned long)ha;
316 ha->timer.function = (void (*)(unsigned long))func;
317 add_timer(&ha->timer);
318 ha->timer_active = 1;
319}
320
321static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
322{
323 del_timer_sync(&ha->timer);
324 ha->timer_active = 0;
325}
326
327/***
328 * qla4xxx_mark_device_missing - mark a device as missing.
329 * @ha: Pointer to host adapter structure.
330 * @ddb_entry: Pointer to device database entry
331 *
332 * This routine marks a device missing and resets the relogin retry count.
333 **/
334void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
335 struct ddb_entry *ddb_entry)
336{
337 atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
338 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
339 ha->host_no, ddb_entry->bus, ddb_entry->target,
340 ddb_entry->fw_ddb_index));
341 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
342}
343
344static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
345 struct ddb_entry *ddb_entry,
346 struct scsi_cmnd *cmd,
347 void (*done)(struct scsi_cmnd *))
348{
349 struct srb *srb;
350
351 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
352 if (!srb)
353 return srb;
354
355 atomic_set(&srb->ref_count, 1);
356 srb->ha = ha;
357 srb->ddb = ddb_entry;
358 srb->cmd = cmd;
359 srb->flags = 0;
360 cmd->SCp.ptr = (void *)srb;
361 cmd->scsi_done = done;
362
363 return srb;
364}
365
366static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
367{
368 struct scsi_cmnd *cmd = srb->cmd;
369
370 if (srb->flags & SRB_DMA_VALID) {
371 if (cmd->use_sg) {
372 pci_unmap_sg(ha->pdev, cmd->request_buffer,
373 cmd->use_sg, cmd->sc_data_direction);
374 } else if (cmd->request_bufflen) {
375 pci_unmap_single(ha->pdev, srb->dma_handle,
376 cmd->request_bufflen,
377 cmd->sc_data_direction);
378 }
379 srb->flags &= ~SRB_DMA_VALID;
380 }
381 cmd->SCp.ptr = NULL;
382}
383
384void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
385{
386 struct scsi_cmnd *cmd = srb->cmd;
387
388 qla4xxx_srb_free_dma(ha, srb);
389
390 mempool_free(srb, ha->srb_mempool);
391
392 cmd->scsi_done(cmd);
393}
394
395/**
396 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
397 * @cmd: Pointer to Linux's SCSI command structure
398 * @done_fn: Function that the driver calls to notify the SCSI mid-layer
399 * that the command has been processed.
400 *
401 * Remarks:
402 * This routine is invoked by Linux to send a SCSI command to the driver.
403 * The mid-level driver tries to ensure that queuecommand never gets
404 * invoked concurrently with itself or the interrupt handler (although
405 * the interrupt handler may call this routine as part of request-
406 * completion handling). Unfortunely, it sometimes calls the scheduler
407 * in interrupt context which is a big NO! NO!.
408 **/
409static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
410 void (*done)(struct scsi_cmnd *))
411{
412 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
413 struct ddb_entry *ddb_entry = cmd->device->hostdata;
414 struct srb *srb;
415 int rval;
416
417 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
418 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
419 cmd->result = DID_NO_CONNECT << 16;
420 goto qc_fail_command;
421 }
422 goto qc_host_busy;
423 }
424
425 spin_unlock_irq(ha->host->host_lock);
426
427 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
428 if (!srb)
429 goto qc_host_busy_lock;
430
431 rval = qla4xxx_send_command_to_isp(ha, srb);
432 if (rval != QLA_SUCCESS)
433 goto qc_host_busy_free_sp;
434
435 spin_lock_irq(ha->host->host_lock);
436 return 0;
437
438qc_host_busy_free_sp:
439 qla4xxx_srb_free_dma(ha, srb);
440 mempool_free(srb, ha->srb_mempool);
441
442qc_host_busy_lock:
443 spin_lock_irq(ha->host->host_lock);
444
445qc_host_busy:
446 return SCSI_MLQUEUE_HOST_BUSY;
447
448qc_fail_command:
449 done(cmd);
450
451 return 0;
452}
453
454/**
455 * qla4xxx_mem_free - frees memory allocated to adapter
456 * @ha: Pointer to host adapter structure.
457 *
458 * Frees memory previously allocated by qla4xxx_mem_alloc
459 **/
460static void qla4xxx_mem_free(struct scsi_qla_host *ha)
461{
462 if (ha->queues)
463 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
464 ha->queues_dma);
465
466 ha->queues_len = 0;
467 ha->queues = NULL;
468 ha->queues_dma = 0;
469 ha->request_ring = NULL;
470 ha->request_dma = 0;
471 ha->response_ring = NULL;
472 ha->response_dma = 0;
473 ha->shadow_regs = NULL;
474 ha->shadow_regs_dma = 0;
475
476 /* Free srb pool. */
477 if (ha->srb_mempool)
478 mempool_destroy(ha->srb_mempool);
479
480 ha->srb_mempool = NULL;
481
482 /* release io space registers */
483 if (ha->reg)
484 iounmap(ha->reg);
485 pci_release_regions(ha->pdev);
486}
487
488/**
489 * qla4xxx_mem_alloc - allocates memory for use by adapter.
490 * @ha: Pointer to host adapter structure
491 *
492 * Allocates DMA memory for request and response queues. Also allocates memory
493 * for srbs.
494 **/
495static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
496{
497 unsigned long align;
498
499 /* Allocate contiguous block of DMA memory for queues. */
500 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
501 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
502 sizeof(struct shadow_regs) +
503 MEM_ALIGN_VALUE +
504 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
505 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
506 &ha->queues_dma, GFP_KERNEL);
507 if (ha->queues == NULL) {
508 dev_warn(&ha->pdev->dev,
509 "Memory Allocation failed - queues.\n");
510
511 goto mem_alloc_error_exit;
512 }
513 memset(ha->queues, 0, ha->queues_len);
514
515 /*
516 * As per RISC alignment requirements -- the bus-address must be a
517 * multiple of the request-ring size (in bytes).
518 */
519 align = 0;
520 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
521 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
522 (MEM_ALIGN_VALUE - 1));
523
524 /* Update request and response queue pointers. */
525 ha->request_dma = ha->queues_dma + align;
526 ha->request_ring = (struct queue_entry *) (ha->queues + align);
527 ha->response_dma = ha->queues_dma + align +
528 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
529 ha->response_ring = (struct queue_entry *) (ha->queues + align +
530 (REQUEST_QUEUE_DEPTH *
531 QUEUE_SIZE));
532 ha->shadow_regs_dma = ha->queues_dma + align +
533 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
534 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
535 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
536 (REQUEST_QUEUE_DEPTH *
537 QUEUE_SIZE) +
538 (RESPONSE_QUEUE_DEPTH *
539 QUEUE_SIZE));
540
541 /* Allocate memory for srb pool. */
542 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
543 mempool_free_slab, srb_cachep);
544 if (ha->srb_mempool == NULL) {
545 dev_warn(&ha->pdev->dev,
546 "Memory Allocation failed - SRB Pool.\n");
547
548 goto mem_alloc_error_exit;
549 }
550
551 return QLA_SUCCESS;
552
553mem_alloc_error_exit:
554 qla4xxx_mem_free(ha);
555 return QLA_ERROR;
556}
557
558/**
559 * qla4xxx_timer - checks every second for work to do.
560 * @ha: Pointer to host adapter structure.
561 **/
562static void qla4xxx_timer(struct scsi_qla_host *ha)
563{
564 struct ddb_entry *ddb_entry, *dtemp;
565 int start_dpc = 0;
566
567 /* Search for relogin's to time-out and port down retry. */
568 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
569 /* Count down time between sending relogins */
570 if (adapter_up(ha) &&
571 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
572 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
573 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
574 INVALID_ENTRY) {
575 if (atomic_read(&ddb_entry->retry_relogin_timer)
576 == 0) {
577 atomic_set(&ddb_entry->
578 retry_relogin_timer,
579 INVALID_ENTRY);
580 set_bit(DPC_RELOGIN_DEVICE,
581 &ha->dpc_flags);
582 set_bit(DF_RELOGIN, &ddb_entry->flags);
583 DEBUG2(printk("scsi%ld: %s: index [%d]"
584 " login device\n",
585 ha->host_no, __func__,
586 ddb_entry->fw_ddb_index));
587 } else
588 atomic_dec(&ddb_entry->
589 retry_relogin_timer);
590 }
591 }
592
593 /* Wait for relogin to timeout */
594 if (atomic_read(&ddb_entry->relogin_timer) &&
595 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
596 /*
597 * If the relogin times out and the device is
598 * still NOT ONLINE then try and relogin again.
599 */
600 if (atomic_read(&ddb_entry->state) !=
601 DDB_STATE_ONLINE &&
602 ddb_entry->fw_ddb_device_state ==
603 DDB_DS_SESSION_FAILED) {
604 /* Reset retry relogin timer */
605 atomic_inc(&ddb_entry->relogin_retry_count);
606 DEBUG2(printk("scsi%ld: index[%d] relogin"
607 " timed out-retrying"
608 " relogin (%d)\n",
609 ha->host_no,
610 ddb_entry->fw_ddb_index,
611 atomic_read(&ddb_entry->
612 relogin_retry_count))
613 );
614 start_dpc++;
615 DEBUG(printk("scsi%ld:%d:%d: index [%d] "
616 "initate relogin after"
617 " %d seconds\n",
618 ha->host_no, ddb_entry->bus,
619 ddb_entry->target,
620 ddb_entry->fw_ddb_index,
621 ddb_entry->default_time2wait + 4)
622 );
623
624 atomic_set(&ddb_entry->retry_relogin_timer,
625 ddb_entry->default_time2wait + 4);
626 }
627 }
628 }
629
630 /* Check for heartbeat interval. */
631 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
632 ha->heartbeat_interval != 0) {
633 ha->seconds_since_last_heartbeat++;
634 if (ha->seconds_since_last_heartbeat >
635 ha->heartbeat_interval + 2)
636 set_bit(DPC_RESET_HA, &ha->dpc_flags);
637 }
638
639
640 /* Wakeup the dpc routine for this adapter, if needed. */
641 if ((start_dpc ||
642 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
643 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
644 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
645 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
646 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
647 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
648 test_bit(DPC_AEN, &ha->dpc_flags)) &&
649 ha->dpc_thread) {
650 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
651 " - dpc flags = 0x%lx\n",
652 ha->host_no, __func__, ha->dpc_flags));
653 queue_work(ha->dpc_thread, &ha->dpc_work);
654 }
655
656 /* Reschedule timer thread to call us back in one second */
657 mod_timer(&ha->timer, jiffies + HZ);
658
659 DEBUG2(ha->seconds_since_last_intr++);
660}
661
662/**
663 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
664 * @ha: Pointer to host adapter structure.
665 *
666 * This routine stalls the driver until all outstanding commands are returned.
667 * Caller must release the Hardware Lock prior to calling this routine.
668 **/
669static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
670{
671 uint32_t index = 0;
672 int stat = QLA_SUCCESS;
673 unsigned long flags;
674 struct scsi_cmnd *cmd;
675 int wait_cnt = WAIT_CMD_TOV; /*
676 * Initialized for 30 seconds as we
677 * expect all commands to retuned
678 * ASAP.
679 */
680
681 while (wait_cnt) {
682 spin_lock_irqsave(&ha->hardware_lock, flags);
683 /* Find a command that hasn't completed. */
684 for (index = 0; index < ha->host->can_queue; index++) {
685 cmd = scsi_host_find_tag(ha->host, index);
686 if (cmd != NULL)
687 break;
688 }
689 spin_unlock_irqrestore(&ha->hardware_lock, flags);
690
691 /* If No Commands are pending, wait is complete */
692 if (index == ha->host->can_queue) {
693 break;
694 }
695
696 /* If we timed out on waiting for commands to come back
697 * return ERROR.
698 */
699 wait_cnt--;
700 if (wait_cnt == 0)
701 stat = QLA_ERROR;
702 else {
703 msleep(1000);
704 }
705 } /* End of While (wait_cnt) */
706
707 return stat;
708}
709
710/**
711 * qla4010_soft_reset - performs soft reset.
712 * @ha: Pointer to host adapter structure.
713 **/
714static int qla4010_soft_reset(struct scsi_qla_host *ha)
715{
716 uint32_t max_wait_time;
717 unsigned long flags = 0;
718 int status = QLA_ERROR;
719 uint32_t ctrl_status;
720
721 spin_lock_irqsave(&ha->hardware_lock, flags);
722
723 /*
724 * If the SCSI Reset Interrupt bit is set, clear it.
725 * Otherwise, the Soft Reset won't work.
726 */
727 ctrl_status = readw(&ha->reg->ctrl_status);
728 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
729 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
730
731 /* Issue Soft Reset */
732 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
733 readl(&ha->reg->ctrl_status);
734
735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
736
737 /* Wait until the Network Reset Intr bit is cleared */
738 max_wait_time = RESET_INTR_TOV;
739 do {
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741 ctrl_status = readw(&ha->reg->ctrl_status);
742 spin_unlock_irqrestore(&ha->hardware_lock, flags);
743
744 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
745 break;
746
747 msleep(1000);
748 } while ((--max_wait_time));
749
750 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
751 DEBUG2(printk(KERN_WARNING
752 "scsi%ld: Network Reset Intr not cleared by "
753 "Network function, clearing it now!\n",
754 ha->host_no));
755 spin_lock_irqsave(&ha->hardware_lock, flags);
756 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
757 readl(&ha->reg->ctrl_status);
758 spin_unlock_irqrestore(&ha->hardware_lock, flags);
759 }
760
761 /* Wait until the firmware tells us the Soft Reset is done */
762 max_wait_time = SOFT_RESET_TOV;
763 do {
764 spin_lock_irqsave(&ha->hardware_lock, flags);
765 ctrl_status = readw(&ha->reg->ctrl_status);
766 spin_unlock_irqrestore(&ha->hardware_lock, flags);
767
768 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
769 status = QLA_SUCCESS;
770 break;
771 }
772
773 msleep(1000);
774 } while ((--max_wait_time));
775
776 /*
777 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
778 * after the soft reset has taken place.
779 */
780 spin_lock_irqsave(&ha->hardware_lock, flags);
781 ctrl_status = readw(&ha->reg->ctrl_status);
782 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
783 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
784 readl(&ha->reg->ctrl_status);
785 }
786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
787
788 /* If soft reset fails then most probably the bios on other
789 * function is also enabled.
790 * Since the initialization is sequential the other fn
791 * wont be able to acknowledge the soft reset.
792 * Issue a force soft reset to workaround this scenario.
793 */
794 if (max_wait_time == 0) {
795 /* Issue Force Soft Reset */
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
798 readl(&ha->reg->ctrl_status);
799 spin_unlock_irqrestore(&ha->hardware_lock, flags);
800 /* Wait until the firmware tells us the Soft Reset is done */
801 max_wait_time = SOFT_RESET_TOV;
802 do {
803 spin_lock_irqsave(&ha->hardware_lock, flags);
804 ctrl_status = readw(&ha->reg->ctrl_status);
805 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806
807 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
808 status = QLA_SUCCESS;
809 break;
810 }
811
812 msleep(1000);
813 } while ((--max_wait_time));
814 }
815
816 return status;
817}
818
819/**
820 * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
821 * @ha: Pointer to host adapter structure.
822 **/
823static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
824{
825 unsigned long flags;
826
827 ql4xxx_lock_nvram(ha);
828 spin_lock_irqsave(&ha->hardware_lock, flags);
829 writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
830 readl(isp_gp_out(ha));
831 mdelay(1);
832
833 writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
834 readl(isp_gp_out(ha));
835 spin_unlock_irqrestore(&ha->hardware_lock, flags);
836 mdelay(2523);
837
838 ql4xxx_unlock_nvram(ha);
839 return QLA_SUCCESS;
840}
841
842/**
843 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
844 * @ha: Pointer to host adapter structure.
845 *
846 * This routine is called just prior to a HARD RESET to return all
847 * outstanding commands back to the Operating System.
848 * Caller should make sure that the following locks are released
849 * before this calling routine: Hardware lock, and io_request_lock.
850 **/
851static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
852{
853 struct srb *srb;
854 int i;
855 unsigned long flags;
856
857 spin_lock_irqsave(&ha->hardware_lock, flags);
858 for (i = 0; i < ha->host->can_queue; i++) {
859 srb = qla4xxx_del_from_active_array(ha, i);
860 if (srb != NULL) {
861 srb->cmd->result = DID_RESET << 16;
862 qla4xxx_srb_compl(ha, srb);
863 }
864 }
865 spin_unlock_irqrestore(&ha->hardware_lock, flags);
866
867}
868
869/**
870 * qla4xxx_hard_reset - performs HBA Hard Reset
871 * @ha: Pointer to host adapter structure.
872 **/
873static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
874{
875 /* The QLA4010 really doesn't have an equivalent to a hard reset */
876 qla4xxx_flush_active_srbs(ha);
877 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
878 int status = QLA_ERROR;
879
880 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
881 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
882 (qla4010_soft_reset(ha) == QLA_SUCCESS))
883 status = QLA_SUCCESS;
884 return status;
885 } else
886 return qla4010_soft_reset(ha);
887}
888
889/**
890 * qla4xxx_recover_adapter - recovers adapter after a fatal error
891 * @ha: Pointer to host adapter structure.
892 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
893 * after adapter recovery has completed.
894 * 0=preserve ddb list, 1=destroy and rebuild ddb list
895 **/
896static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
897 uint8_t renew_ddb_list)
898{
899 int status;
900
901 /* Stall incoming I/O until we are done */
902 clear_bit(AF_ONLINE, &ha->flags);
903 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
904 __func__));
905
906 /* Wait for outstanding commands to complete.
907 * Stalls the driver for max 30 secs
908 */
909 status = qla4xxx_cmd_wait(ha);
910
911 qla4xxx_disable_intrs(ha);
912
913 /* Flush any pending ddb changed AENs */
914 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
915
916 /* Reset the firmware. If successful, function
917 * returns with ISP interrupts enabled.
918 */
919 if (status == QLA_SUCCESS) {
920 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
921 ha->host_no, __func__));
922 status = qla4xxx_soft_reset(ha);
923 }
924 /* FIXMEkaren: Do we want to keep interrupts enabled and process
925 AENs after soft reset */
926
927 /* If firmware (SOFT) reset failed, or if all outstanding
928 * commands have not returned, then do a HARD reset.
929 */
930 if (status == QLA_ERROR) {
931 DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
932 ha->host_no, __func__));
933 status = qla4xxx_hard_reset(ha);
934 }
935
936 /* Flush any pending ddb changed AENs */
937 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
938
939 /* Re-initialize firmware. If successful, function returns
940 * with ISP interrupts enabled */
941 if (status == QLA_SUCCESS) {
942 DEBUG2(printk("scsi%ld: %s - Initializing adapter..\n",
943 ha->host_no, __func__));
944
945 /* If successful, AF_ONLINE flag set in
946 * qla4xxx_initialize_adapter */
947 status = qla4xxx_initialize_adapter(ha, renew_ddb_list);
948 }
949
950 /* Failed adapter initialization?
951 * Retry reset_ha only if invoked via DPC (DPC_RESET_HA) */
952 if ((test_bit(AF_ONLINE, &ha->flags) == 0) &&
953 (test_bit(DPC_RESET_HA, &ha->dpc_flags))) {
954 /* Adapter initialization failed, see if we can retry
955 * resetting the ha */
956 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
957 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
958 DEBUG2(printk("scsi%ld: recover adapter - retrying "
959 "(%d) more times\n", ha->host_no,
960 ha->retry_reset_ha_cnt));
961 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
962 status = QLA_ERROR;
963 } else {
964 if (ha->retry_reset_ha_cnt > 0) {
965 /* Schedule another Reset HA--DPC will retry */
966 ha->retry_reset_ha_cnt--;
967 DEBUG2(printk("scsi%ld: recover adapter - "
968 "retry remaining %d\n",
969 ha->host_no,
970 ha->retry_reset_ha_cnt));
971 status = QLA_ERROR;
972 }
973
974 if (ha->retry_reset_ha_cnt == 0) {
975 /* Recover adapter retries have been exhausted.
976 * Adapter DEAD */
977 DEBUG2(printk("scsi%ld: recover adapter "
978 "failed - board disabled\n",
979 ha->host_no));
980 qla4xxx_flush_active_srbs(ha);
981 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
982 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
983 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST,
984 &ha->dpc_flags);
985 status = QLA_ERROR;
986 }
987 }
988 } else {
989 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
990 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags);
991 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
992 }
993
994 ha->adapter_error_count++;
995
996 if (status == QLA_SUCCESS)
997 qla4xxx_enable_intrs(ha);
998
999 DEBUG2(printk("scsi%ld: recover adapter .. DONE\n", ha->host_no));
1000 return status;
1001}
1002
1003/**
1004 * qla4xxx_do_dpc - dpc routine
1005 * @data: in our case pointer to adapter structure
1006 *
1007 * This routine is a task that is schedule by the interrupt handler
1008 * to perform the background processing for interrupts. We put it
1009 * on a task queue that is consumed whenever the scheduler runs; that's
1010 * so you can do anything (i.e. put the process to sleep etc). In fact,
1011 * the mid-level tries to sleep when it reaches the driver threshold
1012 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1013 **/
1014static void qla4xxx_do_dpc(void *data)
1015{
1016 struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
1017 struct ddb_entry *ddb_entry, *dtemp;
1018
1019 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
1020 ha->host_no, __func__));
1021
1022 DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
1023 ha->host_no, __func__, ha->flags));
1024 DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
1025 ha->host_no, __func__, ha->dpc_flags));
1026
1027 /* Initialization not yet finished. Don't do anything yet. */
1028 if (!test_bit(AF_INIT_DONE, &ha->flags))
1029 return;
1030
1031 if (adapter_up(ha) ||
1032 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1033 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1034 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
1035 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
1036 /*
1037 * dg 09/23 Never initialize ddb list
1038 * once we up and running
1039 * qla4xxx_recover_adapter(ha,
1040 * REBUILD_DDB_LIST);
1041 */
1042 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1043
1044 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1045 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1046
1047 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1048 uint8_t wait_time = RESET_INTR_TOV;
1049 unsigned long flags = 0;
1050
1051 qla4xxx_flush_active_srbs(ha);
1052
1053 spin_lock_irqsave(&ha->hardware_lock, flags);
1054 while ((readw(&ha->reg->ctrl_status) &
1055 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
1056 if (--wait_time == 0)
1057 break;
1058
1059 spin_unlock_irqrestore(&ha->hardware_lock,
1060 flags);
1061
1062 msleep(1000);
1063
1064 spin_lock_irqsave(&ha->hardware_lock, flags);
1065 }
1066 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1067
1068 if (wait_time == 0)
1069 DEBUG2(printk("scsi%ld: %s: SR|FSR "
1070 "bit not cleared-- resetting\n",
1071 ha->host_no, __func__));
1072 }
1073 }
1074
1075 /* ---- process AEN? --- */
1076 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
1077 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
1078
1079 /* ---- Get DHCP IP Address? --- */
1080 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
1081 qla4xxx_get_dhcp_ip_address(ha);
1082
1083 /* ---- relogin device? --- */
1084 if (adapter_up(ha) &&
1085 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
1086 list_for_each_entry_safe(ddb_entry, dtemp,
1087 &ha->ddb_list, list) {
1088 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
1089 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
1090 qla4xxx_relogin_device(ha, ddb_entry);
1091
1092 /*
1093 * If mbx cmd times out there is no point
1094 * in continuing further.
1095 * With large no of targets this can hang
1096 * the system.
1097 */
1098 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1099 printk(KERN_WARNING "scsi%ld: %s: "
1100 "need to reset hba\n",
1101 ha->host_no, __func__);
1102 break;
1103 }
1104 }
1105 }
1106}
1107
1108/**
1109 * qla4xxx_free_adapter - release the adapter
1110 * @ha: pointer to adapter structure
1111 **/
1112static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1113{
1114
1115 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
1116 /* Turn-off interrupts on the card. */
1117 qla4xxx_disable_intrs(ha);
1118 }
1119
1120 /* Kill the kernel thread for this host */
1121 if (ha->dpc_thread)
1122 destroy_workqueue(ha->dpc_thread);
1123
1124 /* Issue Soft Reset to put firmware in unknown state */
1125 qla4xxx_soft_reset(ha);
1126
1127 /* Remove timer thread, if present */
1128 if (ha->timer_active)
1129 qla4xxx_stop_timer(ha);
1130
1131 /* free extra memory */
1132 qla4xxx_mem_free(ha);
1133
1134 /* Detach interrupts */
1135 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1136 free_irq(ha->pdev->irq, ha);
1137
1138 pci_disable_device(ha->pdev);
1139
1140}
1141
1142/***
1143 * qla4xxx_iospace_config - maps registers
1144 * @ha: pointer to adapter structure
1145 *
1146 * This routines maps HBA's registers from the pci address space
1147 * into the kernel virtual address space for memory mapped i/o.
1148 **/
1149static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1150{
1151 unsigned long pio, pio_len, pio_flags;
1152 unsigned long mmio, mmio_len, mmio_flags;
1153
1154 pio = pci_resource_start(ha->pdev, 0);
1155 pio_len = pci_resource_len(ha->pdev, 0);
1156 pio_flags = pci_resource_flags(ha->pdev, 0);
1157 if (pio_flags & IORESOURCE_IO) {
1158 if (pio_len < MIN_IOBASE_LEN) {
1159 dev_warn(&ha->pdev->dev,
1160 "Invalid PCI I/O region size\n");
1161 pio = 0;
1162 }
1163 } else {
1164 dev_warn(&ha->pdev->dev, "region #0 not a PIO resource\n");
1165 pio = 0;
1166 }
1167
1168 /* Use MMIO operations for all accesses. */
1169 mmio = pci_resource_start(ha->pdev, 1);
1170 mmio_len = pci_resource_len(ha->pdev, 1);
1171 mmio_flags = pci_resource_flags(ha->pdev, 1);
1172
1173 if (!(mmio_flags & IORESOURCE_MEM)) {
1174 dev_err(&ha->pdev->dev,
1175 "region #0 not an MMIO resource, aborting\n");
1176
1177 goto iospace_error_exit;
1178 }
1179 if (mmio_len < MIN_IOBASE_LEN) {
1180 dev_err(&ha->pdev->dev,
1181 "Invalid PCI mem region size, aborting\n");
1182 goto iospace_error_exit;
1183 }
1184
1185 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
1186 dev_warn(&ha->pdev->dev,
1187 "Failed to reserve PIO/MMIO regions\n");
1188
1189 goto iospace_error_exit;
1190 }
1191
1192 ha->pio_address = pio;
1193 ha->pio_length = pio_len;
1194 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
1195 if (!ha->reg) {
1196 dev_err(&ha->pdev->dev,
1197 "cannot remap MMIO, aborting\n");
1198
1199 goto iospace_error_exit;
1200 }
1201
1202 return 0;
1203
1204iospace_error_exit:
1205 return -ENOMEM;
1206}
1207
1208/**
1209 * qla4xxx_probe_adapter - callback function to probe HBA
1210 * @pdev: pointer to pci_dev structure
1211 * @pci_device_id: pointer to pci_device entry
1212 *
1213 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
1214 * It returns zero if successful. It also initializes all data necessary for
1215 * the driver.
1216 **/
1217static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1218 const struct pci_device_id *ent)
1219{
1220 int ret = -ENODEV, status;
1221 struct Scsi_Host *host;
1222 struct scsi_qla_host *ha;
1223 struct ddb_entry *ddb_entry, *ddbtemp;
1224 uint8_t init_retry_count = 0;
1225 char buf[34];
1226
1227 if (pci_enable_device(pdev))
1228 return -1;
1229
1230 host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
1231 if (host == NULL) {
1232 printk(KERN_WARNING
1233 "qla4xxx: Couldn't allocate host from scsi layer!\n");
1234 goto probe_disable_device;
1235 }
1236
1237 /* Clear our data area */
1238 ha = (struct scsi_qla_host *) host->hostdata;
1239 memset(ha, 0, sizeof(*ha));
1240
1241 /* Save the information from PCI BIOS. */
1242 ha->pdev = pdev;
1243 ha->host = host;
1244 ha->host_no = host->host_no;
1245
1246 /* Configure PCI I/O space. */
1247 ret = qla4xxx_iospace_config(ha);
1248 if (ret)
1249 goto probe_failed;
1250
1251 dev_info(&ha->pdev->dev, "Found an ISP%04x, irq %d, iobase 0x%p\n",
1252 pdev->device, pdev->irq, ha->reg);
1253
1254 qla4xxx_config_dma_addressing(ha);
1255
1256 /* Initialize lists and spinlocks. */
1257 INIT_LIST_HEAD(&ha->ddb_list);
1258 INIT_LIST_HEAD(&ha->free_srb_q);
1259
1260 mutex_init(&ha->mbox_sem);
1261 init_waitqueue_head(&ha->mailbox_wait_queue);
1262
1263 spin_lock_init(&ha->hardware_lock);
1264 spin_lock_init(&ha->list_lock);
1265
1266 /* Allocate dma buffers */
1267 if (qla4xxx_mem_alloc(ha)) {
1268 dev_warn(&ha->pdev->dev,
1269 "[ERROR] Failed to allocate memory for adapter\n");
1270
1271 ret = -ENOMEM;
1272 goto probe_failed;
1273 }
1274
1275 /*
1276 * Initialize the Host adapter request/response queues and
1277 * firmware
1278 * NOTE: interrupts enabled upon successful completion
1279 */
1280 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1281 while (status == QLA_ERROR && init_retry_count++ < MAX_INIT_RETRIES) {
1282 DEBUG2(printk("scsi: %s: retrying adapter initialization "
1283 "(%d)\n", __func__, init_retry_count));
1284 qla4xxx_soft_reset(ha);
1285 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1286 }
1287 if (status == QLA_ERROR) {
1288 dev_warn(&ha->pdev->dev, "Failed to initialize adapter\n");
1289
1290 ret = -ENODEV;
1291 goto probe_failed;
1292 }
1293
1294 host->cmd_per_lun = 3;
1295 host->max_channel = 0;
1296 host->max_lun = MAX_LUNS - 1;
1297 host->max_id = MAX_TARGETS;
1298 host->max_cmd_len = IOCB_MAX_CDB_LEN;
1299 host->can_queue = MAX_SRBS ;
1300 host->transportt = qla4xxx_scsi_transport;
1301
1302 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
1303 if (ret) {
1304 dev_warn(&ha->pdev->dev, "scsi_init_shared_tag_map failed");
1305 goto probe_failed;
1306 }
1307
1308 /* Startup the kernel thread for this host adapter. */
1309 DEBUG2(printk("scsi: %s: Starting kernel thread for "
1310 "qla4xxx_dpc\n", __func__));
1311 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
1312 ha->dpc_thread = create_singlethread_workqueue(buf);
1313 if (!ha->dpc_thread) {
1314 dev_warn(&ha->pdev->dev, "Unable to start DPC thread!\n");
1315 ret = -ENODEV;
1316 goto probe_failed;
1317 }
1318 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
1319
1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler,
1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
1322 if (ret) {
1323 dev_warn(&ha->pdev->dev, "Failed to reserve interrupt %d"
1324 " already in use.\n", pdev->irq);
1325 goto probe_failed;
1326 }
1327 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1328 host->irq = pdev->irq;
1329 DEBUG(printk("scsi%d: irq %d attached\n", ha->host_no, ha->pdev->irq));
1330
1331 qla4xxx_enable_intrs(ha);
1332
1333 /* Start timer thread. */
1334 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
1335
1336 set_bit(AF_INIT_DONE, &ha->flags);
1337
1338 pci_set_drvdata(pdev, ha);
1339
1340 ret = scsi_add_host(host, &pdev->dev);
1341 if (ret)
1342 goto probe_failed;
1343
1344 /* Update transport device information for all devices. */
1345 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
1346 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
1347 if (qla4xxx_add_sess(ddb_entry))
1348 goto remove_host;
1349 }
1350
1351 printk(KERN_INFO
1352 " QLogic iSCSI HBA Driver version: %s\n"
1353 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
1354 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1355 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1356 ha->patch_number, ha->build_number);
1357
1358 return 0;
1359
1360remove_host:
1361 qla4xxx_free_ddb_list(ha);
1362 scsi_remove_host(host);
1363
1364probe_failed:
1365 qla4xxx_free_adapter(ha);
1366 scsi_host_put(ha->host);
1367
1368probe_disable_device:
1369 pci_disable_device(pdev);
1370
1371 return ret;
1372}
1373
1374/**
1375 * qla4xxx_remove_adapter - calback function to remove adapter.
1376 * @pci_dev: PCI device pointer
1377 **/
1378static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1379{
1380 struct scsi_qla_host *ha;
1381
1382 ha = pci_get_drvdata(pdev);
1383
1384 /* remove devs from iscsi_sessions to scsi_devices */
1385 qla4xxx_free_ddb_list(ha);
1386
1387 scsi_remove_host(ha->host);
1388
1389 qla4xxx_free_adapter(ha);
1390
1391 scsi_host_put(ha->host);
1392
1393 pci_set_drvdata(pdev, NULL);
1394}
1395
1396/**
1397 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
1398 * @ha: HA context
1399 *
1400 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1401 * supported addressing method.
1402 */
1403void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
1404{
1405 int retval;
1406
1407 /* Update our PCI device dma_mask for full 64 bit mask */
1408 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
1409 if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1410 dev_dbg(&ha->pdev->dev,
1411 "Failed to set 64 bit PCI consistent mask; "
1412 "using 32 bit.\n");
1413 retval = pci_set_consistent_dma_mask(ha->pdev,
1414 DMA_32BIT_MASK);
1415 }
1416 } else
1417 retval = pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1418}
1419
1420static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1421{
1422 struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
1423 struct ddb_entry *ddb = sess->dd_data;
1424
1425 sdev->hostdata = ddb;
1426 sdev->tagged_supported = 1;
1427 scsi_activate_tcq(sdev, sdev->host->can_queue);
1428 return 0;
1429}
1430
1431static int qla4xxx_slave_configure(struct scsi_device *sdev)
1432{
1433 sdev->tagged_supported = 1;
1434 return 0;
1435}
1436
1437static void qla4xxx_slave_destroy(struct scsi_device *sdev)
1438{
1439 scsi_deactivate_tcq(sdev, 1);
1440}
1441
1442/**
1443 * qla4xxx_del_from_active_array - returns an active srb
1444 * @ha: Pointer to host adapter structure.
1445 * @index: index into to the active_array
1446 *
1447 * This routine removes and returns the srb at the specified index
1448 **/
1449struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
1450{
1451 struct srb *srb = NULL;
1452 struct scsi_cmnd *cmd;
1453
1454 if (!(cmd = scsi_host_find_tag(ha->host, index)))
1455 return srb;
1456
1457 if (!(srb = (struct srb *)cmd->host_scribble))
1458 return srb;
1459
1460 /* update counters */
1461 if (srb->flags & SRB_DMA_VALID) {
1462 ha->req_q_count += srb->iocb_cnt;
1463 ha->iocb_cnt -= srb->iocb_cnt;
1464 if (srb->cmd)
1465 srb->cmd->host_scribble = NULL;
1466 }
1467 return srb;
1468}
1469
1470/**
1471 * qla4xxx_soft_reset - performs a SOFT RESET of hba.
1472 * @ha: Pointer to host adapter structure.
1473 **/
1474int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1475{
1476
1477 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
1478 __func__));
1479 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
1480 int status = QLA_ERROR;
1481
1482 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
1483 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
1484 (qla4010_soft_reset(ha) == QLA_SUCCESS) )
1485 status = QLA_SUCCESS;
1486 return status;
1487 } else
1488 return qla4010_soft_reset(ha);
1489}
1490
1491/**
1492 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1493 * @ha: actual ha whose done queue will contain the comd returned by firmware.
1494 * @cmd: Scsi Command to wait on.
1495 *
1496 * This routine waits for the command to be returned by the Firmware
1497 * for some max time.
1498 **/
1499static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
1500 struct scsi_cmnd *cmd)
1501{
1502 int done = 0;
1503 struct srb *rp;
1504 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
1505
1506 do {
1507 /* Checking to see if its returned to OS */
1508 rp = (struct srb *) cmd->SCp.ptr;
1509 if (rp == NULL) {
1510 done++;
1511 break;
1512 }
1513
1514 msleep(2000);
1515 } while (max_wait_time--);
1516
1517 return done;
1518}
1519
1520/**
1521 * qla4xxx_wait_for_hba_online - waits for HBA to come online
1522 * @ha: Pointer to host adapter structure
1523 **/
1524static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
1525{
1526 unsigned long wait_online;
1527
1528 wait_online = jiffies + (30 * HZ);
1529 while (time_before(jiffies, wait_online)) {
1530
1531 if (adapter_up(ha))
1532 return QLA_SUCCESS;
1533 else if (ha->retry_reset_ha_cnt == 0)
1534 return QLA_ERROR;
1535
1536 msleep(2000);
1537 }
1538
1539 return QLA_ERROR;
1540}
1541
1542/**
1543 * qla4xxx_eh_wait_for_active_target_commands - wait for active cmds to finish.
1544 * @ha: pointer to to HBA
1545 * @t: target id
1546 * @l: lun id
1547 *
1548 * This function waits for all outstanding commands to a lun to complete. It
1549 * returns 0 if all pending commands are returned and 1 otherwise.
1550 **/
1551static int qla4xxx_eh_wait_for_active_target_commands(struct scsi_qla_host *ha,
1552 int t, int l)
1553{
1554 int cnt;
1555 int status = 0;
1556 struct scsi_cmnd *cmd;
1557
1558 /*
1559 * Waiting for all commands for the designated target in the active
1560 * array
1561 */
1562 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
1563 cmd = scsi_host_find_tag(ha->host, cnt);
1564 if (cmd && cmd->device->id == t && cmd->device->lun == l) {
1565 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
1566 status++;
1567 break;
1568 }
1569 }
1570 }
1571 return status;
1572}
1573
1574/**
1575 * qla4xxx_eh_device_reset - callback for target reset.
1576 * @cmd: Pointer to Linux's SCSI command structure
1577 *
1578 * This routine is called by the Linux OS to reset all luns on the
1579 * specified target.
1580 **/
1581static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1582{
1583 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1584 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1585 struct srb *sp;
1586 int ret = FAILED, stat;
1587
1588 sp = (struct srb *) cmd->SCp.ptr;
1589 if (!sp || !ddb_entry)
1590 return ret;
1591
1592 dev_info(&ha->pdev->dev,
1593 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
1594 cmd->device->channel, cmd->device->id, cmd->device->lun);
1595
1596 DEBUG2(printk(KERN_INFO
1597 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1598 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1599 cmd, jiffies, cmd->timeout_per_command / HZ,
1600 ha->dpc_flags, cmd->result, cmd->allowed));
1601
1602 /* FIXME: wait for hba to go online */
1603 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
1604 if (stat != QLA_SUCCESS) {
1605 dev_info(&ha->pdev->dev, "DEVICE RESET FAILED. %d\n", stat);
1606 goto eh_dev_reset_done;
1607 }
1608
1609 /* Send marker. */
1610 ha->marker_needed = 1;
1611
1612 /*
1613 * If we are coming down the EH path, wait for all commands to complete
1614 * for the device.
1615 */
1616 if (cmd->device->host->shost_state == SHOST_RECOVERY) {
1617 if (qla4xxx_eh_wait_for_active_target_commands(ha,
1618 cmd->device->id,
1619 cmd->device->lun)){
1620 dev_info(&ha->pdev->dev,
1621 "DEVICE RESET FAILED - waiting for "
1622 "commands.\n");
1623 goto eh_dev_reset_done;
1624 }
1625 }
1626
1627 dev_info(&ha->pdev->dev,
1628 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
1629 ha->host_no, cmd->device->channel, cmd->device->id,
1630 cmd->device->lun);
1631
1632 ret = SUCCESS;
1633
1634eh_dev_reset_done:
1635
1636 return ret;
1637}
1638
1639/**
1640 * qla4xxx_eh_host_reset - kernel callback
1641 * @cmd: Pointer to Linux's SCSI command structure
1642 *
1643 * This routine is invoked by the Linux kernel to perform fatal error
1644 * recovery on the specified adapter.
1645 **/
1646static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1647{
1648 int return_status = FAILED;
1649 struct scsi_qla_host *ha;
1650
1651 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
1652
1653 dev_info(&ha->pdev->dev,
1654 "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no,
1655 cmd->device->channel, cmd->device->id, cmd->device->lun);
1656
1657 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
1658 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
1659 "DEAD.\n", ha->host_no, cmd->device->channel,
1660 __func__));
1661
1662 return FAILED;
1663 }
1664
1665 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) {
1666 return_status = SUCCESS;
1667 }
1668
1669 dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
1670 return_status == FAILED ? "FAILED" : "SUCCEDED");
1671
1672 return return_status;
1673}
1674
1675
1676static struct pci_device_id qla4xxx_pci_tbl[] = {
1677 {
1678 .vendor = PCI_VENDOR_ID_QLOGIC,
1679 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
1680 .subvendor = PCI_ANY_ID,
1681 .subdevice = PCI_ANY_ID,
1682 },
1683 {
1684 .vendor = PCI_VENDOR_ID_QLOGIC,
1685 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
1686 .subvendor = PCI_ANY_ID,
1687 .subdevice = PCI_ANY_ID,
1688 },
1689 {0, 0},
1690};
1691MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
1692
1693struct pci_driver qla4xxx_pci_driver = {
1694 .name = DRIVER_NAME,
1695 .id_table = qla4xxx_pci_tbl,
1696 .probe = qla4xxx_probe_adapter,
1697 .remove = qla4xxx_remove_adapter,
1698};
1699
1700static int __init qla4xxx_module_init(void)
1701{
1702 int ret;
1703
1704 /* Allocate cache for SRBs. */
1705 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
1706 SLAB_HWCACHE_ALIGN, NULL, NULL);
1707 if (srb_cachep == NULL) {
1708 printk(KERN_ERR
1709 "%s: Unable to allocate SRB cache..."
1710 "Failing load!\n", DRIVER_NAME);
1711 ret = -ENOMEM;
1712 goto no_srp_cache;
1713 }
1714
1715 /* Derive version string. */
1716 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
1717 if (extended_error_logging)
1718 strcat(qla4xxx_version_str, "-debug");
1719
1720 qla4xxx_scsi_transport =
1721 iscsi_register_transport(&qla4xxx_iscsi_transport);
1722 if (!qla4xxx_scsi_transport){
1723 ret = -ENODEV;
1724 goto release_srb_cache;
1725 }
1726
1727 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
1728 ret = pci_register_driver(&qla4xxx_pci_driver);
1729 if (ret)
1730 goto unregister_transport;
1731
1732 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
1733 return 0;
1734unregister_transport:
1735 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1736release_srb_cache:
1737 kmem_cache_destroy(srb_cachep);
1738no_srp_cache:
1739 return ret;
1740}
1741
1742static void __exit qla4xxx_module_exit(void)
1743{
1744 pci_unregister_driver(&qla4xxx_pci_driver);
1745 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1746 kmem_cache_destroy(srb_cachep);
1747}
1748
1749module_init(qla4xxx_module_init);
1750module_exit(qla4xxx_module_exit);
1751
1752MODULE_AUTHOR("QLogic Corporation");
1753MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
1754MODULE_LICENSE("GPL");
1755MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
new file mode 100644
index 00000000000..b3fe7e68988
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -0,0 +1,13 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
9
10#define QL4_DRIVER_MAJOR_VER 5
11#define QL4_DRIVER_MINOR_VER 0
12#define QL4_DRIVER_PATCH_VER 5
13#define QL4_DRIVER_BETA_VER 9