diff options
author | Anirban Chakraborty <anirban.chakraborty@qlogic.com> | 2008-11-06 13:40:51 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-12-29 12:24:16 -0500 |
commit | e315cd28b9ef0d7b71e462ac16e18dbaa2f5adfe (patch) | |
tree | 1e20bdd40b56b36f211bde8fff0c63792b088a0a /drivers/scsi/qla2xxx/qla_isr.c | |
parent | 7b867cf76fbcc8d77867cbec6f509f71dce8a98f (diff) |
[SCSI] qla2xxx: Code changes for qla data structure refactoring
Following changes have been made:
1. Outstanding commands are based on a request queue, scsi_qla_host
does not maintain it anymore.
2. start_scsi is accessed via isp_ops struct instead of direct
invocation.
3. Interrupt registrations are done using response queue instead of
device id.
Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_isr.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 633 |
1 files changed, 351 insertions, 282 deletions
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a76efd99d007..89d327117aa8 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -14,6 +14,7 @@ static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); | |||
14 | static void qla2x00_status_entry(scsi_qla_host_t *, void *); | 14 | static void qla2x00_status_entry(scsi_qla_host_t *, void *); |
15 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); | 15 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); |
16 | static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); | 16 | static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); |
17 | static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); | ||
17 | 18 | ||
18 | /** | 19 | /** |
19 | * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. | 20 | * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. |
@@ -27,24 +28,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); | |||
27 | irqreturn_t | 28 | irqreturn_t |
28 | qla2100_intr_handler(int irq, void *dev_id) | 29 | qla2100_intr_handler(int irq, void *dev_id) |
29 | { | 30 | { |
30 | scsi_qla_host_t *ha; | 31 | scsi_qla_host_t *vha; |
32 | struct qla_hw_data *ha; | ||
31 | struct device_reg_2xxx __iomem *reg; | 33 | struct device_reg_2xxx __iomem *reg; |
32 | int status; | 34 | int status; |
33 | unsigned long iter; | 35 | unsigned long iter; |
34 | uint16_t hccr; | 36 | uint16_t hccr; |
35 | uint16_t mb[4]; | 37 | uint16_t mb[4]; |
38 | struct rsp_que *rsp; | ||
36 | 39 | ||
37 | ha = (scsi_qla_host_t *) dev_id; | 40 | rsp = (struct rsp_que *) dev_id; |
38 | if (!ha) { | 41 | if (!rsp) { |
39 | printk(KERN_INFO | 42 | printk(KERN_INFO |
40 | "%s(): NULL host pointer\n", __func__); | 43 | "%s(): NULL response queue pointer\n", __func__); |
41 | return (IRQ_NONE); | 44 | return (IRQ_NONE); |
42 | } | 45 | } |
43 | 46 | ||
47 | ha = rsp->hw; | ||
44 | reg = &ha->iobase->isp; | 48 | reg = &ha->iobase->isp; |
45 | status = 0; | 49 | status = 0; |
46 | 50 | ||
47 | spin_lock(&ha->hardware_lock); | 51 | spin_lock(&ha->hardware_lock); |
52 | vha = qla2x00_get_rsp_host(rsp); | ||
48 | for (iter = 50; iter--; ) { | 53 | for (iter = 50; iter--; ) { |
49 | hccr = RD_REG_WORD(®->hccr); | 54 | hccr = RD_REG_WORD(®->hccr); |
50 | if (hccr & HCCR_RISC_PAUSE) { | 55 | if (hccr & HCCR_RISC_PAUSE) { |
@@ -59,8 +64,8 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
59 | WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); | 64 | WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); |
60 | RD_REG_WORD(®->hccr); | 65 | RD_REG_WORD(®->hccr); |
61 | 66 | ||
62 | ha->isp_ops->fw_dump(ha, 1); | 67 | ha->isp_ops->fw_dump(vha, 1); |
63 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 68 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
64 | break; | 69 | break; |
65 | } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) | 70 | } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) |
66 | break; | 71 | break; |
@@ -72,24 +77,24 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
72 | /* Get mailbox data. */ | 77 | /* Get mailbox data. */ |
73 | mb[0] = RD_MAILBOX_REG(ha, reg, 0); | 78 | mb[0] = RD_MAILBOX_REG(ha, reg, 0); |
74 | if (mb[0] > 0x3fff && mb[0] < 0x8000) { | 79 | if (mb[0] > 0x3fff && mb[0] < 0x8000) { |
75 | qla2x00_mbx_completion(ha, mb[0]); | 80 | qla2x00_mbx_completion(vha, mb[0]); |
76 | status |= MBX_INTERRUPT; | 81 | status |= MBX_INTERRUPT; |
77 | } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { | 82 | } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { |
78 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); | 83 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); |
79 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 84 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
80 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); | 85 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); |
81 | qla2x00_async_event(ha, mb); | 86 | qla2x00_async_event(vha, mb); |
82 | } else { | 87 | } else { |
83 | /*EMPTY*/ | 88 | /*EMPTY*/ |
84 | DEBUG2(printk("scsi(%ld): Unrecognized " | 89 | DEBUG2(printk("scsi(%ld): Unrecognized " |
85 | "interrupt type (%d).\n", | 90 | "interrupt type (%d).\n", |
86 | ha->host_no, mb[0])); | 91 | vha->host_no, mb[0])); |
87 | } | 92 | } |
88 | /* Release mailbox registers. */ | 93 | /* Release mailbox registers. */ |
89 | WRT_REG_WORD(®->semaphore, 0); | 94 | WRT_REG_WORD(®->semaphore, 0); |
90 | RD_REG_WORD(®->semaphore); | 95 | RD_REG_WORD(®->semaphore); |
91 | } else { | 96 | } else { |
92 | qla2x00_process_response_queue(ha); | 97 | qla2x00_process_response_queue(vha); |
93 | 98 | ||
94 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); | 99 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); |
95 | RD_REG_WORD(®->hccr); | 100 | RD_REG_WORD(®->hccr); |
@@ -118,25 +123,29 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
118 | irqreturn_t | 123 | irqreturn_t |
119 | qla2300_intr_handler(int irq, void *dev_id) | 124 | qla2300_intr_handler(int irq, void *dev_id) |
120 | { | 125 | { |
121 | scsi_qla_host_t *ha; | 126 | scsi_qla_host_t *vha; |
122 | struct device_reg_2xxx __iomem *reg; | 127 | struct device_reg_2xxx __iomem *reg; |
123 | int status; | 128 | int status; |
124 | unsigned long iter; | 129 | unsigned long iter; |
125 | uint32_t stat; | 130 | uint32_t stat; |
126 | uint16_t hccr; | 131 | uint16_t hccr; |
127 | uint16_t mb[4]; | 132 | uint16_t mb[4]; |
133 | struct rsp_que *rsp; | ||
134 | struct qla_hw_data *ha; | ||
128 | 135 | ||
129 | ha = (scsi_qla_host_t *) dev_id; | 136 | rsp = (struct rsp_que *) dev_id; |
130 | if (!ha) { | 137 | if (!rsp) { |
131 | printk(KERN_INFO | 138 | printk(KERN_INFO |
132 | "%s(): NULL host pointer\n", __func__); | 139 | "%s(): NULL response queue pointer\n", __func__); |
133 | return (IRQ_NONE); | 140 | return (IRQ_NONE); |
134 | } | 141 | } |
135 | 142 | ||
143 | ha = rsp->hw; | ||
136 | reg = &ha->iobase->isp; | 144 | reg = &ha->iobase->isp; |
137 | status = 0; | 145 | status = 0; |
138 | 146 | ||
139 | spin_lock(&ha->hardware_lock); | 147 | spin_lock(&ha->hardware_lock); |
148 | vha = qla2x00_get_rsp_host(rsp); | ||
140 | for (iter = 50; iter--; ) { | 149 | for (iter = 50; iter--; ) { |
141 | stat = RD_REG_DWORD(®->u.isp2300.host_status); | 150 | stat = RD_REG_DWORD(®->u.isp2300.host_status); |
142 | if (stat & HSR_RISC_PAUSED) { | 151 | if (stat & HSR_RISC_PAUSED) { |
@@ -159,8 +168,8 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
159 | WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); | 168 | WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); |
160 | RD_REG_WORD(®->hccr); | 169 | RD_REG_WORD(®->hccr); |
161 | 170 | ||
162 | ha->isp_ops->fw_dump(ha, 1); | 171 | ha->isp_ops->fw_dump(vha, 1); |
163 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 172 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
164 | break; | 173 | break; |
165 | } else if ((stat & HSR_RISC_INT) == 0) | 174 | } else if ((stat & HSR_RISC_INT) == 0) |
166 | break; | 175 | break; |
@@ -170,7 +179,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
170 | case 0x2: | 179 | case 0x2: |
171 | case 0x10: | 180 | case 0x10: |
172 | case 0x11: | 181 | case 0x11: |
173 | qla2x00_mbx_completion(ha, MSW(stat)); | 182 | qla2x00_mbx_completion(vha, MSW(stat)); |
174 | status |= MBX_INTERRUPT; | 183 | status |= MBX_INTERRUPT; |
175 | 184 | ||
176 | /* Release mailbox registers. */ | 185 | /* Release mailbox registers. */ |
@@ -181,26 +190,26 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
181 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); | 190 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); |
182 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 191 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
183 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); | 192 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); |
184 | qla2x00_async_event(ha, mb); | 193 | qla2x00_async_event(vha, mb); |
185 | break; | 194 | break; |
186 | case 0x13: | 195 | case 0x13: |
187 | qla2x00_process_response_queue(ha); | 196 | qla2x00_process_response_queue(vha); |
188 | break; | 197 | break; |
189 | case 0x15: | 198 | case 0x15: |
190 | mb[0] = MBA_CMPLT_1_16BIT; | 199 | mb[0] = MBA_CMPLT_1_16BIT; |
191 | mb[1] = MSW(stat); | 200 | mb[1] = MSW(stat); |
192 | qla2x00_async_event(ha, mb); | 201 | qla2x00_async_event(vha, mb); |
193 | break; | 202 | break; |
194 | case 0x16: | 203 | case 0x16: |
195 | mb[0] = MBA_SCSI_COMPLETION; | 204 | mb[0] = MBA_SCSI_COMPLETION; |
196 | mb[1] = MSW(stat); | 205 | mb[1] = MSW(stat); |
197 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 206 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
198 | qla2x00_async_event(ha, mb); | 207 | qla2x00_async_event(vha, mb); |
199 | break; | 208 | break; |
200 | default: | 209 | default: |
201 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 210 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
202 | "(%d).\n", | 211 | "(%d).\n", |
203 | ha->host_no, stat & 0xff)); | 212 | vha->host_no, stat & 0xff)); |
204 | break; | 213 | break; |
205 | } | 214 | } |
206 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); | 215 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); |
@@ -223,10 +232,11 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
223 | * @mb0: Mailbox0 register | 232 | * @mb0: Mailbox0 register |
224 | */ | 233 | */ |
225 | static void | 234 | static void |
226 | qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | 235 | qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) |
227 | { | 236 | { |
228 | uint16_t cnt; | 237 | uint16_t cnt; |
229 | uint16_t __iomem *wptr; | 238 | uint16_t __iomem *wptr; |
239 | struct qla_hw_data *ha = vha->hw; | ||
230 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 240 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
231 | 241 | ||
232 | /* Load return mailbox registers. */ | 242 | /* Load return mailbox registers. */ |
@@ -247,10 +257,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | |||
247 | 257 | ||
248 | if (ha->mcp) { | 258 | if (ha->mcp) { |
249 | DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", | 259 | DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", |
250 | __func__, ha->host_no, ha->mcp->mb[0])); | 260 | __func__, vha->host_no, ha->mcp->mb[0])); |
251 | } else { | 261 | } else { |
252 | DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", | 262 | DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", |
253 | __func__, ha->host_no)); | 263 | __func__, vha->host_no)); |
254 | } | 264 | } |
255 | } | 265 | } |
256 | 266 | ||
@@ -260,7 +270,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | |||
260 | * @mb: Mailbox registers (0 - 3) | 270 | * @mb: Mailbox registers (0 - 3) |
261 | */ | 271 | */ |
262 | void | 272 | void |
263 | qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | 273 | qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) |
264 | { | 274 | { |
265 | #define LS_UNKNOWN 2 | 275 | #define LS_UNKNOWN 2 |
266 | static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; | 276 | static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; |
@@ -268,6 +278,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
268 | uint16_t handle_cnt; | 278 | uint16_t handle_cnt; |
269 | uint16_t cnt; | 279 | uint16_t cnt; |
270 | uint32_t handles[5]; | 280 | uint32_t handles[5]; |
281 | struct qla_hw_data *ha = vha->hw; | ||
271 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 282 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
272 | uint32_t rscn_entry, host_pid; | 283 | uint32_t rscn_entry, host_pid; |
273 | uint8_t rscn_queue_index; | 284 | uint8_t rscn_queue_index; |
@@ -329,17 +340,18 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
329 | 340 | ||
330 | switch (mb[0]) { | 341 | switch (mb[0]) { |
331 | case MBA_SCSI_COMPLETION: /* Fast Post */ | 342 | case MBA_SCSI_COMPLETION: /* Fast Post */ |
332 | if (!ha->flags.online) | 343 | if (!vha->flags.online) |
333 | break; | 344 | break; |
334 | 345 | ||
335 | for (cnt = 0; cnt < handle_cnt; cnt++) | 346 | for (cnt = 0; cnt < handle_cnt; cnt++) |
336 | qla2x00_process_completed_request(ha, handles[cnt]); | 347 | qla2x00_process_completed_request(vha, handles[cnt]); |
337 | break; | 348 | break; |
338 | 349 | ||
339 | case MBA_RESET: /* Reset */ | 350 | case MBA_RESET: /* Reset */ |
340 | DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); | 351 | DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", |
352 | vha->host_no)); | ||
341 | 353 | ||
342 | set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); | 354 | set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
343 | break; | 355 | break; |
344 | 356 | ||
345 | case MBA_SYSTEM_ERR: /* System Error */ | 357 | case MBA_SYSTEM_ERR: /* System Error */ |
@@ -347,70 +359,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
347 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", | 359 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", |
348 | mb[1], mb[2], mb[3]); | 360 | mb[1], mb[2], mb[3]); |
349 | 361 | ||
350 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | 362 | qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]); |
351 | ha->isp_ops->fw_dump(ha, 1); | 363 | ha->isp_ops->fw_dump(vha, 1); |
352 | 364 | ||
353 | if (IS_FWI2_CAPABLE(ha)) { | 365 | if (IS_FWI2_CAPABLE(ha)) { |
354 | if (mb[1] == 0 && mb[2] == 0) { | 366 | if (mb[1] == 0 && mb[2] == 0) { |
355 | qla_printk(KERN_ERR, ha, | 367 | qla_printk(KERN_ERR, ha, |
356 | "Unrecoverable Hardware Error: adapter " | 368 | "Unrecoverable Hardware Error: adapter " |
357 | "marked OFFLINE!\n"); | 369 | "marked OFFLINE!\n"); |
358 | ha->flags.online = 0; | 370 | vha->flags.online = 0; |
359 | } else | 371 | } else |
360 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 372 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
361 | } else if (mb[1] == 0) { | 373 | } else if (mb[1] == 0) { |
362 | qla_printk(KERN_INFO, ha, | 374 | qla_printk(KERN_INFO, ha, |
363 | "Unrecoverable Hardware Error: adapter marked " | 375 | "Unrecoverable Hardware Error: adapter marked " |
364 | "OFFLINE!\n"); | 376 | "OFFLINE!\n"); |
365 | ha->flags.online = 0; | 377 | vha->flags.online = 0; |
366 | } else | 378 | } else |
367 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 379 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
368 | break; | 380 | break; |
369 | 381 | ||
370 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ | 382 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ |
371 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", | 383 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", |
372 | ha->host_no)); | 384 | vha->host_no)); |
373 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); | 385 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); |
374 | 386 | ||
375 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | 387 | qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]); |
376 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 388 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
377 | break; | 389 | break; |
378 | 390 | ||
379 | case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ | 391 | case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ |
380 | DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", | 392 | DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", |
381 | ha->host_no)); | 393 | vha->host_no)); |
382 | qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); | 394 | qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); |
383 | 395 | ||
384 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | 396 | qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]); |
385 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 397 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
386 | break; | 398 | break; |
387 | 399 | ||
388 | case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ | 400 | case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ |
389 | DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", | 401 | DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", |
390 | ha->host_no)); | 402 | vha->host_no)); |
391 | break; | 403 | break; |
392 | 404 | ||
393 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ | 405 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ |
394 | DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, | 406 | DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, |
395 | mb[1])); | 407 | mb[1])); |
396 | qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); | 408 | qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); |
397 | 409 | ||
398 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 410 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
399 | atomic_set(&ha->loop_state, LOOP_DOWN); | 411 | atomic_set(&vha->loop_state, LOOP_DOWN); |
400 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); | 412 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
401 | qla2x00_mark_all_devices_lost(ha, 1); | 413 | qla2x00_mark_all_devices_lost(vha, 1); |
402 | } | 414 | } |
403 | 415 | ||
404 | if (ha->parent) { | 416 | if (vha->vp_idx) { |
405 | atomic_set(&ha->vp_state, VP_FAILED); | 417 | atomic_set(&vha->vp_state, VP_FAILED); |
406 | fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); | 418 | fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); |
407 | } | 419 | } |
408 | 420 | ||
409 | set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); | 421 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); |
410 | set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); | 422 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); |
411 | 423 | ||
412 | ha->flags.management_server_logged_in = 0; | 424 | vha->flags.management_server_logged_in = 0; |
413 | qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); | 425 | qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); |
414 | break; | 426 | break; |
415 | 427 | ||
416 | case MBA_LOOP_UP: /* Loop Up Event */ | 428 | case MBA_LOOP_UP: /* Loop Up Event */ |
@@ -425,59 +437,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
425 | } | 437 | } |
426 | 438 | ||
427 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", | 439 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", |
428 | ha->host_no, link_speed)); | 440 | vha->host_no, link_speed)); |
429 | qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", | 441 | qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", |
430 | link_speed); | 442 | link_speed); |
431 | 443 | ||
432 | ha->flags.management_server_logged_in = 0; | 444 | vha->flags.management_server_logged_in = 0; |
433 | qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); | 445 | qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); |
434 | break; | 446 | break; |
435 | 447 | ||
436 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 448 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
437 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " | 449 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " |
438 | "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); | 450 | "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); |
439 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", | 451 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", |
440 | mb[1], mb[2], mb[3]); | 452 | mb[1], mb[2], mb[3]); |
441 | 453 | ||
442 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 454 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
443 | atomic_set(&ha->loop_state, LOOP_DOWN); | 455 | atomic_set(&vha->loop_state, LOOP_DOWN); |
444 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); | 456 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
445 | ha->device_flags |= DFLG_NO_CABLE; | 457 | vha->device_flags |= DFLG_NO_CABLE; |
446 | qla2x00_mark_all_devices_lost(ha, 1); | 458 | qla2x00_mark_all_devices_lost(vha, 1); |
447 | } | 459 | } |
448 | 460 | ||
449 | if (ha->parent) { | 461 | if (vha->vp_idx) { |
450 | atomic_set(&ha->vp_state, VP_FAILED); | 462 | atomic_set(&vha->vp_state, VP_FAILED); |
451 | fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); | 463 | fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); |
452 | } | 464 | } |
453 | 465 | ||
454 | ha->flags.management_server_logged_in = 0; | 466 | vha->flags.management_server_logged_in = 0; |
455 | ha->link_data_rate = PORT_SPEED_UNKNOWN; | 467 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
456 | qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); | 468 | qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); |
457 | break; | 469 | break; |
458 | 470 | ||
459 | case MBA_LIP_RESET: /* LIP reset occurred */ | 471 | case MBA_LIP_RESET: /* LIP reset occurred */ |
460 | DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", | 472 | DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", |
461 | ha->host_no, mb[1])); | 473 | vha->host_no, mb[1])); |
462 | qla_printk(KERN_INFO, ha, | 474 | qla_printk(KERN_INFO, ha, |
463 | "LIP reset occurred (%x).\n", mb[1]); | 475 | "LIP reset occurred (%x).\n", mb[1]); |
464 | 476 | ||
465 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 477 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
466 | atomic_set(&ha->loop_state, LOOP_DOWN); | 478 | atomic_set(&vha->loop_state, LOOP_DOWN); |
467 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); | 479 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
468 | qla2x00_mark_all_devices_lost(ha, 1); | 480 | qla2x00_mark_all_devices_lost(vha, 1); |
469 | } | 481 | } |
470 | 482 | ||
471 | if (ha->parent) { | 483 | if (vha->vp_idx) { |
472 | atomic_set(&ha->vp_state, VP_FAILED); | 484 | atomic_set(&vha->vp_state, VP_FAILED); |
473 | fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); | 485 | fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); |
474 | } | 486 | } |
475 | 487 | ||
476 | set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); | 488 | set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
477 | 489 | ||
478 | ha->operating_mode = LOOP; | 490 | ha->operating_mode = LOOP; |
479 | ha->flags.management_server_logged_in = 0; | 491 | vha->flags.management_server_logged_in = 0; |
480 | qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); | 492 | qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); |
481 | break; | 493 | break; |
482 | 494 | ||
483 | case MBA_POINT_TO_POINT: /* Point-to-Point */ | 495 | case MBA_POINT_TO_POINT: /* Point-to-Point */ |
@@ -485,33 +497,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
485 | break; | 497 | break; |
486 | 498 | ||
487 | DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", | 499 | DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", |
488 | ha->host_no)); | 500 | vha->host_no)); |
489 | 501 | ||
490 | /* | 502 | /* |
491 | * Until there's a transition from loop down to loop up, treat | 503 | * Until there's a transition from loop down to loop up, treat |
492 | * this as loop down only. | 504 | * this as loop down only. |
493 | */ | 505 | */ |
494 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 506 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
495 | atomic_set(&ha->loop_state, LOOP_DOWN); | 507 | atomic_set(&vha->loop_state, LOOP_DOWN); |
496 | if (!atomic_read(&ha->loop_down_timer)) | 508 | if (!atomic_read(&vha->loop_down_timer)) |
497 | atomic_set(&ha->loop_down_timer, | 509 | atomic_set(&vha->loop_down_timer, |
498 | LOOP_DOWN_TIME); | 510 | LOOP_DOWN_TIME); |
499 | qla2x00_mark_all_devices_lost(ha, 1); | 511 | qla2x00_mark_all_devices_lost(vha, 1); |
500 | } | 512 | } |
501 | 513 | ||
502 | if (ha->parent) { | 514 | if (vha->vp_idx) { |
503 | atomic_set(&ha->vp_state, VP_FAILED); | 515 | atomic_set(&vha->vp_state, VP_FAILED); |
504 | fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); | 516 | fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); |
505 | } | 517 | } |
506 | 518 | ||
507 | if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { | 519 | if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) |
508 | set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); | 520 | set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
509 | } | 521 | |
510 | set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); | 522 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); |
511 | set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); | 523 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); |
512 | 524 | ||
513 | ha->flags.gpsc_supported = 1; | 525 | ha->flags.gpsc_supported = 1; |
514 | ha->flags.management_server_logged_in = 0; | 526 | vha->flags.management_server_logged_in = 0; |
515 | break; | 527 | break; |
516 | 528 | ||
517 | case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ | 529 | case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ |
@@ -520,25 +532,25 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
520 | 532 | ||
521 | DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " | 533 | DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " |
522 | "received.\n", | 534 | "received.\n", |
523 | ha->host_no)); | 535 | vha->host_no)); |
524 | qla_printk(KERN_INFO, ha, | 536 | qla_printk(KERN_INFO, ha, |
525 | "Configuration change detected: value=%x.\n", mb[1]); | 537 | "Configuration change detected: value=%x.\n", mb[1]); |
526 | 538 | ||
527 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 539 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
528 | atomic_set(&ha->loop_state, LOOP_DOWN); | 540 | atomic_set(&vha->loop_state, LOOP_DOWN); |
529 | if (!atomic_read(&ha->loop_down_timer)) | 541 | if (!atomic_read(&vha->loop_down_timer)) |
530 | atomic_set(&ha->loop_down_timer, | 542 | atomic_set(&vha->loop_down_timer, |
531 | LOOP_DOWN_TIME); | 543 | LOOP_DOWN_TIME); |
532 | qla2x00_mark_all_devices_lost(ha, 1); | 544 | qla2x00_mark_all_devices_lost(vha, 1); |
533 | } | 545 | } |
534 | 546 | ||
535 | if (ha->parent) { | 547 | if (vha->vp_idx) { |
536 | atomic_set(&ha->vp_state, VP_FAILED); | 548 | atomic_set(&vha->vp_state, VP_FAILED); |
537 | fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); | 549 | fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); |
538 | } | 550 | } |
539 | 551 | ||
540 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 552 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
541 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | 553 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
542 | break; | 554 | break; |
543 | 555 | ||
544 | case MBA_PORT_UPDATE: /* Port database update */ | 556 | case MBA_PORT_UPDATE: /* Port database update */ |
@@ -547,107 +559,106 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
547 | * event etc. earlier indicating loop is down) then process | 559 | * event etc. earlier indicating loop is down) then process |
548 | * it. Otherwise ignore it and Wait for RSCN to come in. | 560 | * it. Otherwise ignore it and Wait for RSCN to come in. |
549 | */ | 561 | */ |
550 | atomic_set(&ha->loop_down_timer, 0); | 562 | atomic_set(&vha->loop_down_timer, 0); |
551 | if (atomic_read(&ha->loop_state) != LOOP_DOWN && | 563 | if (atomic_read(&vha->loop_state) != LOOP_DOWN && |
552 | atomic_read(&ha->loop_state) != LOOP_DEAD) { | 564 | atomic_read(&vha->loop_state) != LOOP_DEAD) { |
553 | DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " | 565 | DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " |
554 | "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], | 566 | "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], |
555 | mb[2], mb[3])); | 567 | mb[2], mb[3])); |
556 | break; | 568 | break; |
557 | } | 569 | } |
558 | 570 | ||
559 | DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", | 571 | DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", |
560 | ha->host_no)); | 572 | vha->host_no)); |
561 | DEBUG(printk(KERN_INFO | 573 | DEBUG(printk(KERN_INFO |
562 | "scsi(%ld): Port database changed %04x %04x %04x.\n", | 574 | "scsi(%ld): Port database changed %04x %04x %04x.\n", |
563 | ha->host_no, mb[1], mb[2], mb[3])); | 575 | vha->host_no, mb[1], mb[2], mb[3])); |
564 | 576 | ||
565 | /* | 577 | /* |
566 | * Mark all devices as missing so we will login again. | 578 | * Mark all devices as missing so we will login again. |
567 | */ | 579 | */ |
568 | atomic_set(&ha->loop_state, LOOP_UP); | 580 | atomic_set(&vha->loop_state, LOOP_UP); |
569 | 581 | ||
570 | qla2x00_mark_all_devices_lost(ha, 1); | 582 | qla2x00_mark_all_devices_lost(vha, 1); |
571 | 583 | ||
572 | ha->flags.rscn_queue_overflow = 1; | 584 | vha->flags.rscn_queue_overflow = 1; |
573 | 585 | ||
574 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 586 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
575 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | 587 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
576 | break; | 588 | break; |
577 | 589 | ||
578 | case MBA_RSCN_UPDATE: /* State Change Registration */ | 590 | case MBA_RSCN_UPDATE: /* State Change Registration */ |
579 | /* Check if the Vport has issued a SCR */ | 591 | /* Check if the Vport has issued a SCR */ |
580 | if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) | 592 | if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) |
581 | break; | 593 | break; |
582 | /* Only handle SCNs for our Vport index. */ | 594 | /* Only handle SCNs for our Vport index. */ |
583 | if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) | 595 | if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) |
584 | break; | 596 | break; |
585 | |||
586 | DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", | 597 | DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", |
587 | ha->host_no)); | 598 | vha->host_no)); |
588 | DEBUG(printk(KERN_INFO | 599 | DEBUG(printk(KERN_INFO |
589 | "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", | 600 | "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", |
590 | ha->host_no, mb[1], mb[2], mb[3])); | 601 | vha->host_no, mb[1], mb[2], mb[3])); |
591 | 602 | ||
592 | rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; | 603 | rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; |
593 | host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | | 604 | host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) |
594 | ha->d_id.b.al_pa; | 605 | | vha->d_id.b.al_pa; |
595 | if (rscn_entry == host_pid) { | 606 | if (rscn_entry == host_pid) { |
596 | DEBUG(printk(KERN_INFO | 607 | DEBUG(printk(KERN_INFO |
597 | "scsi(%ld): Ignoring RSCN update to local host " | 608 | "scsi(%ld): Ignoring RSCN update to local host " |
598 | "port ID (%06x)\n", | 609 | "port ID (%06x)\n", |
599 | ha->host_no, host_pid)); | 610 | vha->host_no, host_pid)); |
600 | break; | 611 | break; |
601 | } | 612 | } |
602 | 613 | ||
603 | /* Ignore reserved bits from RSCN-payload. */ | 614 | /* Ignore reserved bits from RSCN-payload. */ |
604 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; | 615 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; |
605 | rscn_queue_index = ha->rscn_in_ptr + 1; | 616 | rscn_queue_index = vha->rscn_in_ptr + 1; |
606 | if (rscn_queue_index == MAX_RSCN_COUNT) | 617 | if (rscn_queue_index == MAX_RSCN_COUNT) |
607 | rscn_queue_index = 0; | 618 | rscn_queue_index = 0; |
608 | if (rscn_queue_index != ha->rscn_out_ptr) { | 619 | if (rscn_queue_index != vha->rscn_out_ptr) { |
609 | ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; | 620 | vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; |
610 | ha->rscn_in_ptr = rscn_queue_index; | 621 | vha->rscn_in_ptr = rscn_queue_index; |
611 | } else { | 622 | } else { |
612 | ha->flags.rscn_queue_overflow = 1; | 623 | vha->flags.rscn_queue_overflow = 1; |
613 | } | 624 | } |
614 | 625 | ||
615 | atomic_set(&ha->loop_state, LOOP_UPDATE); | 626 | atomic_set(&vha->loop_state, LOOP_UPDATE); |
616 | atomic_set(&ha->loop_down_timer, 0); | 627 | atomic_set(&vha->loop_down_timer, 0); |
617 | ha->flags.management_server_logged_in = 0; | 628 | vha->flags.management_server_logged_in = 0; |
618 | 629 | ||
619 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 630 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
620 | set_bit(RSCN_UPDATE, &ha->dpc_flags); | 631 | set_bit(RSCN_UPDATE, &vha->dpc_flags); |
621 | qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); | 632 | qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); |
622 | break; | 633 | break; |
623 | 634 | ||
624 | /* case MBA_RIO_RESPONSE: */ | 635 | /* case MBA_RIO_RESPONSE: */ |
625 | case MBA_ZIO_RESPONSE: | 636 | case MBA_ZIO_RESPONSE: |
626 | DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", | 637 | DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", |
627 | ha->host_no)); | 638 | vha->host_no)); |
628 | DEBUG(printk(KERN_INFO | 639 | DEBUG(printk(KERN_INFO |
629 | "scsi(%ld): [R|Z]IO update completion.\n", | 640 | "scsi(%ld): [R|Z]IO update completion.\n", |
630 | ha->host_no)); | 641 | vha->host_no)); |
631 | 642 | ||
632 | if (IS_FWI2_CAPABLE(ha)) | 643 | if (IS_FWI2_CAPABLE(ha)) |
633 | qla24xx_process_response_queue(ha); | 644 | qla24xx_process_response_queue(vha); |
634 | else | 645 | else |
635 | qla2x00_process_response_queue(ha); | 646 | qla2x00_process_response_queue(vha); |
636 | break; | 647 | break; |
637 | 648 | ||
638 | case MBA_DISCARD_RND_FRAME: | 649 | case MBA_DISCARD_RND_FRAME: |
639 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " | 650 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " |
640 | "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); | 651 | "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); |
641 | break; | 652 | break; |
642 | 653 | ||
643 | case MBA_TRACE_NOTIFICATION: | 654 | case MBA_TRACE_NOTIFICATION: |
644 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", | 655 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", |
645 | ha->host_no, mb[1], mb[2])); | 656 | vha->host_no, mb[1], mb[2])); |
646 | break; | 657 | break; |
647 | 658 | ||
648 | case MBA_ISP84XX_ALERT: | 659 | case MBA_ISP84XX_ALERT: |
649 | DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " | 660 | DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " |
650 | "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); | 661 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); |
651 | 662 | ||
652 | spin_lock_irqsave(&ha->cs84xx->access_lock, flags); | 663 | spin_lock_irqsave(&ha->cs84xx->access_lock, flags); |
653 | switch (mb[1]) { | 664 | switch (mb[1]) { |
@@ -682,7 +693,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
682 | break; | 693 | break; |
683 | } | 694 | } |
684 | 695 | ||
685 | if (!ha->parent && ha->num_vhosts) | 696 | if (!vha->vp_idx && ha->num_vhosts) |
686 | qla2x00_alert_all_vps(ha, mb); | 697 | qla2x00_alert_all_vps(ha, mb); |
687 | } | 698 | } |
688 | 699 | ||
@@ -690,8 +701,8 @@ static void | |||
690 | qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) | 701 | qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) |
691 | { | 702 | { |
692 | fc_port_t *fcport = data; | 703 | fc_port_t *fcport = data; |
693 | 704 | struct qla_hw_data *ha = fcport->vha->hw; | |
694 | if (fcport->ha->max_q_depth <= sdev->queue_depth) | 705 | if (ha->req->max_q_depth <= sdev->queue_depth) |
695 | return; | 706 | return; |
696 | 707 | ||
697 | if (sdev->ordered_tags) | 708 | if (sdev->ordered_tags) |
@@ -703,9 +714,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) | |||
703 | 714 | ||
704 | fcport->last_ramp_up = jiffies; | 715 | fcport->last_ramp_up = jiffies; |
705 | 716 | ||
706 | DEBUG2(qla_printk(KERN_INFO, fcport->ha, | 717 | DEBUG2(qla_printk(KERN_INFO, ha, |
707 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", | 718 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", |
708 | fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, | 719 | fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, |
709 | sdev->queue_depth)); | 720 | sdev->queue_depth)); |
710 | } | 721 | } |
711 | 722 | ||
@@ -717,20 +728,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data) | |||
717 | if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) | 728 | if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) |
718 | return; | 729 | return; |
719 | 730 | ||
720 | DEBUG2(qla_printk(KERN_INFO, fcport->ha, | 731 | DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, |
721 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", | 732 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", |
722 | fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, | 733 | fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, |
723 | sdev->queue_depth)); | 734 | sdev->queue_depth)); |
724 | } | 735 | } |
725 | 736 | ||
726 | static inline void | 737 | static inline void |
727 | qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) | 738 | qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp) |
728 | { | 739 | { |
729 | fc_port_t *fcport; | 740 | fc_port_t *fcport; |
730 | struct scsi_device *sdev; | 741 | struct scsi_device *sdev; |
742 | struct qla_hw_data *ha = vha->hw; | ||
731 | 743 | ||
732 | sdev = sp->cmd->device; | 744 | sdev = sp->cmd->device; |
733 | if (sdev->queue_depth >= ha->max_q_depth) | 745 | if (sdev->queue_depth >= ha->req->max_q_depth) |
734 | return; | 746 | return; |
735 | 747 | ||
736 | fcport = sp->fcport; | 748 | fcport = sp->fcport; |
@@ -751,25 +763,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) | |||
751 | * @index: SRB index | 763 | * @index: SRB index |
752 | */ | 764 | */ |
753 | static void | 765 | static void |
754 | qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) | 766 | qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index) |
755 | { | 767 | { |
756 | srb_t *sp; | 768 | srb_t *sp; |
769 | struct qla_hw_data *ha = vha->hw; | ||
770 | struct req_que *req = ha->req; | ||
757 | 771 | ||
758 | /* Validate handle. */ | 772 | /* Validate handle. */ |
759 | if (index >= MAX_OUTSTANDING_COMMANDS) { | 773 | if (index >= MAX_OUTSTANDING_COMMANDS) { |
760 | DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", | 774 | DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", |
761 | ha->host_no, index)); | 775 | vha->host_no, index)); |
762 | qla_printk(KERN_WARNING, ha, | 776 | qla_printk(KERN_WARNING, ha, |
763 | "Invalid SCSI completion handle %d.\n", index); | 777 | "Invalid SCSI completion handle %d.\n", index); |
764 | 778 | ||
765 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 779 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
766 | return; | 780 | return; |
767 | } | 781 | } |
768 | 782 | ||
769 | sp = ha->outstanding_cmds[index]; | 783 | sp = req->outstanding_cmds[index]; |
770 | if (sp) { | 784 | if (sp) { |
771 | /* Free outstanding command slot. */ | 785 | /* Free outstanding command slot. */ |
772 | ha->outstanding_cmds[index] = NULL; | 786 | req->outstanding_cmds[index] = NULL; |
773 | 787 | ||
774 | CMD_COMPL_STATUS(sp->cmd) = 0L; | 788 | CMD_COMPL_STATUS(sp->cmd) = 0L; |
775 | CMD_SCSI_STATUS(sp->cmd) = 0L; | 789 | CMD_SCSI_STATUS(sp->cmd) = 0L; |
@@ -777,15 +791,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) | |||
777 | /* Save ISP completion status */ | 791 | /* Save ISP completion status */ |
778 | sp->cmd->result = DID_OK << 16; | 792 | sp->cmd->result = DID_OK << 16; |
779 | 793 | ||
780 | qla2x00_ramp_up_queue_depth(ha, sp); | 794 | qla2x00_ramp_up_queue_depth(vha, sp); |
781 | qla2x00_sp_compl(ha, sp); | 795 | qla2x00_sp_compl(vha, sp); |
782 | } else { | 796 | } else { |
783 | DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", | 797 | DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", |
784 | ha->host_no)); | 798 | vha->host_no)); |
785 | qla_printk(KERN_WARNING, ha, | 799 | qla_printk(KERN_WARNING, ha, |
786 | "Invalid ISP SCSI completion handle\n"); | 800 | "Invalid ISP SCSI completion handle\n"); |
787 | 801 | ||
788 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 802 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
789 | } | 803 | } |
790 | } | 804 | } |
791 | 805 | ||
@@ -794,32 +808,34 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) | |||
794 | * @ha: SCSI driver HA context | 808 | * @ha: SCSI driver HA context |
795 | */ | 809 | */ |
796 | void | 810 | void |
797 | qla2x00_process_response_queue(struct scsi_qla_host *ha) | 811 | qla2x00_process_response_queue(struct scsi_qla_host *vha) |
798 | { | 812 | { |
813 | struct qla_hw_data *ha = vha->hw; | ||
799 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 814 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
800 | sts_entry_t *pkt; | 815 | sts_entry_t *pkt; |
801 | uint16_t handle_cnt; | 816 | uint16_t handle_cnt; |
802 | uint16_t cnt; | 817 | uint16_t cnt; |
818 | struct rsp_que *rsp = ha->rsp; | ||
803 | 819 | ||
804 | if (!ha->flags.online) | 820 | if (!vha->flags.online) |
805 | return; | 821 | return; |
806 | 822 | ||
807 | while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { | 823 | while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { |
808 | pkt = (sts_entry_t *)ha->response_ring_ptr; | 824 | pkt = (sts_entry_t *)rsp->ring_ptr; |
809 | 825 | ||
810 | ha->rsp_ring_index++; | 826 | rsp->ring_index++; |
811 | if (ha->rsp_ring_index == ha->response_q_length) { | 827 | if (rsp->ring_index == rsp->length) { |
812 | ha->rsp_ring_index = 0; | 828 | rsp->ring_index = 0; |
813 | ha->response_ring_ptr = ha->response_ring; | 829 | rsp->ring_ptr = rsp->ring; |
814 | } else { | 830 | } else { |
815 | ha->response_ring_ptr++; | 831 | rsp->ring_ptr++; |
816 | } | 832 | } |
817 | 833 | ||
818 | if (pkt->entry_status != 0) { | 834 | if (pkt->entry_status != 0) { |
819 | DEBUG3(printk(KERN_INFO | 835 | DEBUG3(printk(KERN_INFO |
820 | "scsi(%ld): Process error entry.\n", ha->host_no)); | 836 | "scsi(%ld): Process error entry.\n", vha->host_no)); |
821 | 837 | ||
822 | qla2x00_error_entry(ha, pkt); | 838 | qla2x00_error_entry(vha, pkt); |
823 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 839 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
824 | wmb(); | 840 | wmb(); |
825 | continue; | 841 | continue; |
@@ -827,31 +843,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha) | |||
827 | 843 | ||
828 | switch (pkt->entry_type) { | 844 | switch (pkt->entry_type) { |
829 | case STATUS_TYPE: | 845 | case STATUS_TYPE: |
830 | qla2x00_status_entry(ha, pkt); | 846 | qla2x00_status_entry(vha, pkt); |
831 | break; | 847 | break; |
832 | case STATUS_TYPE_21: | 848 | case STATUS_TYPE_21: |
833 | handle_cnt = ((sts21_entry_t *)pkt)->handle_count; | 849 | handle_cnt = ((sts21_entry_t *)pkt)->handle_count; |
834 | for (cnt = 0; cnt < handle_cnt; cnt++) { | 850 | for (cnt = 0; cnt < handle_cnt; cnt++) { |
835 | qla2x00_process_completed_request(ha, | 851 | qla2x00_process_completed_request(vha, |
836 | ((sts21_entry_t *)pkt)->handle[cnt]); | 852 | ((sts21_entry_t *)pkt)->handle[cnt]); |
837 | } | 853 | } |
838 | break; | 854 | break; |
839 | case STATUS_TYPE_22: | 855 | case STATUS_TYPE_22: |
840 | handle_cnt = ((sts22_entry_t *)pkt)->handle_count; | 856 | handle_cnt = ((sts22_entry_t *)pkt)->handle_count; |
841 | for (cnt = 0; cnt < handle_cnt; cnt++) { | 857 | for (cnt = 0; cnt < handle_cnt; cnt++) { |
842 | qla2x00_process_completed_request(ha, | 858 | qla2x00_process_completed_request(vha, |
843 | ((sts22_entry_t *)pkt)->handle[cnt]); | 859 | ((sts22_entry_t *)pkt)->handle[cnt]); |
844 | } | 860 | } |
845 | break; | 861 | break; |
846 | case STATUS_CONT_TYPE: | 862 | case STATUS_CONT_TYPE: |
847 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); | 863 | qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); |
848 | break; | 864 | break; |
849 | default: | 865 | default: |
850 | /* Type Not Supported. */ | 866 | /* Type Not Supported. */ |
851 | DEBUG4(printk(KERN_WARNING | 867 | DEBUG4(printk(KERN_WARNING |
852 | "scsi(%ld): Received unknown response pkt type %x " | 868 | "scsi(%ld): Received unknown response pkt type %x " |
853 | "entry status=%x.\n", | 869 | "entry status=%x.\n", |
854 | ha->host_no, pkt->entry_type, pkt->entry_status)); | 870 | vha->host_no, pkt->entry_type, pkt->entry_status)); |
855 | break; | 871 | break; |
856 | } | 872 | } |
857 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 873 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
@@ -859,7 +875,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha) | |||
859 | } | 875 | } |
860 | 876 | ||
861 | /* Adjust ring index */ | 877 | /* Adjust ring index */ |
862 | WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); | 878 | WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); |
863 | } | 879 | } |
864 | 880 | ||
865 | static inline void | 881 | static inline void |
@@ -881,10 +897,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) | |||
881 | sp->request_sense_ptr += sense_len; | 897 | sp->request_sense_ptr += sense_len; |
882 | sp->request_sense_length -= sense_len; | 898 | sp->request_sense_length -= sense_len; |
883 | if (sp->request_sense_length != 0) | 899 | if (sp->request_sense_length != 0) |
884 | sp->fcport->ha->status_srb = sp; | 900 | sp->fcport->vha->status_srb = sp; |
885 | 901 | ||
886 | DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " | 902 | DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " |
887 | "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, | 903 | "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, |
888 | cp->device->channel, cp->device->id, cp->device->lun, cp, | 904 | cp->device->channel, cp->device->id, cp->device->lun, cp, |
889 | cp->serial_number)); | 905 | cp->serial_number)); |
890 | if (sense_len) | 906 | if (sense_len) |
@@ -898,7 +914,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) | |||
898 | * @pkt: Entry pointer | 914 | * @pkt: Entry pointer |
899 | */ | 915 | */ |
900 | static void | 916 | static void |
901 | qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | 917 | qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) |
902 | { | 918 | { |
903 | srb_t *sp; | 919 | srb_t *sp; |
904 | fc_port_t *fcport; | 920 | fc_port_t *fcport; |
@@ -911,6 +927,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
911 | int32_t resid; | 927 | int32_t resid; |
912 | uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; | 928 | uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; |
913 | uint8_t *rsp_info, *sense_data; | 929 | uint8_t *rsp_info, *sense_data; |
930 | struct qla_hw_data *ha = vha->hw; | ||
931 | struct req_que *req = ha->req; | ||
914 | 932 | ||
915 | sts = (sts_entry_t *) pkt; | 933 | sts = (sts_entry_t *) pkt; |
916 | sts24 = (struct sts_entry_24xx *) pkt; | 934 | sts24 = (struct sts_entry_24xx *) pkt; |
@@ -924,31 +942,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
924 | 942 | ||
925 | /* Fast path completion. */ | 943 | /* Fast path completion. */ |
926 | if (comp_status == CS_COMPLETE && scsi_status == 0) { | 944 | if (comp_status == CS_COMPLETE && scsi_status == 0) { |
927 | qla2x00_process_completed_request(ha, sts->handle); | 945 | qla2x00_process_completed_request(vha, sts->handle); |
928 | 946 | ||
929 | return; | 947 | return; |
930 | } | 948 | } |
931 | 949 | ||
932 | /* Validate handle. */ | 950 | /* Validate handle. */ |
933 | if (sts->handle < MAX_OUTSTANDING_COMMANDS) { | 951 | if (sts->handle < MAX_OUTSTANDING_COMMANDS) { |
934 | sp = ha->outstanding_cmds[sts->handle]; | 952 | sp = req->outstanding_cmds[sts->handle]; |
935 | ha->outstanding_cmds[sts->handle] = NULL; | 953 | req->outstanding_cmds[sts->handle] = NULL; |
936 | } else | 954 | } else |
937 | sp = NULL; | 955 | sp = NULL; |
938 | 956 | ||
939 | if (sp == NULL) { | 957 | if (sp == NULL) { |
940 | DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", | 958 | DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", |
941 | ha->host_no)); | 959 | vha->host_no)); |
942 | qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); | 960 | qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); |
943 | 961 | ||
944 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 962 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
945 | qla2xxx_wake_dpc(ha); | 963 | qla2xxx_wake_dpc(vha); |
946 | return; | 964 | return; |
947 | } | 965 | } |
948 | cp = sp->cmd; | 966 | cp = sp->cmd; |
949 | if (cp == NULL) { | 967 | if (cp == NULL) { |
950 | DEBUG2(printk("scsi(%ld): Command already returned back to OS " | 968 | DEBUG2(printk("scsi(%ld): Command already returned back to OS " |
951 | "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); | 969 | "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); |
952 | qla_printk(KERN_WARNING, ha, | 970 | qla_printk(KERN_WARNING, ha, |
953 | "Command is NULL: already returned to OS (sp=%p)\n", sp); | 971 | "Command is NULL: already returned to OS (sp=%p)\n", sp); |
954 | 972 | ||
@@ -987,14 +1005,14 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
987 | if (rsp_info_len > 3 && rsp_info[3]) { | 1005 | if (rsp_info_len > 3 && rsp_info[3]) { |
988 | DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " | 1006 | DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " |
989 | "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." | 1007 | "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." |
990 | "retrying command\n", ha->host_no, | 1008 | "retrying command\n", vha->host_no, |
991 | cp->device->channel, cp->device->id, | 1009 | cp->device->channel, cp->device->id, |
992 | cp->device->lun, rsp_info_len, rsp_info[0], | 1010 | cp->device->lun, rsp_info_len, rsp_info[0], |
993 | rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], | 1011 | rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], |
994 | rsp_info[5], rsp_info[6], rsp_info[7])); | 1012 | rsp_info[5], rsp_info[6], rsp_info[7])); |
995 | 1013 | ||
996 | cp->result = DID_BUS_BUSY << 16; | 1014 | cp->result = DID_BUS_BUSY << 16; |
997 | qla2x00_sp_compl(ha, sp); | 1015 | qla2x00_sp_compl(vha, sp); |
998 | return; | 1016 | return; |
999 | } | 1017 | } |
1000 | } | 1018 | } |
@@ -1025,7 +1043,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1025 | qla_printk(KERN_INFO, ha, | 1043 | qla_printk(KERN_INFO, ha, |
1026 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " | 1044 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " |
1027 | "detected (%x of %x bytes)...returning " | 1045 | "detected (%x of %x bytes)...returning " |
1028 | "error status.\n", ha->host_no, | 1046 | "error status.\n", vha->host_no, |
1029 | cp->device->channel, cp->device->id, | 1047 | cp->device->channel, cp->device->id, |
1030 | cp->device->lun, resid, | 1048 | cp->device->lun, resid, |
1031 | scsi_bufflen(cp)); | 1049 | scsi_bufflen(cp)); |
@@ -1039,7 +1057,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1039 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1057 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
1040 | DEBUG2(printk(KERN_INFO | 1058 | DEBUG2(printk(KERN_INFO |
1041 | "scsi(%ld): QUEUE FULL status detected " | 1059 | "scsi(%ld): QUEUE FULL status detected " |
1042 | "0x%x-0x%x.\n", ha->host_no, comp_status, | 1060 | "0x%x-0x%x.\n", vha->host_no, comp_status, |
1043 | scsi_status)); | 1061 | scsi_status)); |
1044 | 1062 | ||
1045 | /* Adjust queue depth for all luns on the port. */ | 1063 | /* Adjust queue depth for all luns on the port. */ |
@@ -1078,7 +1096,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1078 | DEBUG2(printk(KERN_INFO | 1096 | DEBUG2(printk(KERN_INFO |
1079 | "scsi(%ld:%d:%d) UNDERRUN status detected " | 1097 | "scsi(%ld:%d:%d) UNDERRUN status detected " |
1080 | "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " | 1098 | "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " |
1081 | "os_underflow=0x%x\n", ha->host_no, | 1099 | "os_underflow=0x%x\n", vha->host_no, |
1082 | cp->device->id, cp->device->lun, comp_status, | 1100 | cp->device->id, cp->device->lun, comp_status, |
1083 | scsi_status, resid_len, resid, cp->cmnd[0], | 1101 | scsi_status, resid_len, resid, cp->cmnd[0], |
1084 | cp->underflow)); | 1102 | cp->underflow)); |
@@ -1095,7 +1113,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1095 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1113 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
1096 | DEBUG2(printk(KERN_INFO | 1114 | DEBUG2(printk(KERN_INFO |
1097 | "scsi(%ld): QUEUE FULL status detected " | 1115 | "scsi(%ld): QUEUE FULL status detected " |
1098 | "0x%x-0x%x.\n", ha->host_no, comp_status, | 1116 | "0x%x-0x%x.\n", vha->host_no, comp_status, |
1099 | scsi_status)); | 1117 | scsi_status)); |
1100 | 1118 | ||
1101 | /* | 1119 | /* |
@@ -1125,10 +1143,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1125 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { | 1143 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { |
1126 | DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " | 1144 | DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " |
1127 | "frame(s) detected (%x of %x bytes)..." | 1145 | "frame(s) detected (%x of %x bytes)..." |
1128 | "retrying command.\n", ha->host_no, | 1146 | "retrying command.\n", |
1129 | cp->device->channel, cp->device->id, | 1147 | vha->host_no, cp->device->channel, |
1130 | cp->device->lun, resid, | 1148 | cp->device->id, cp->device->lun, resid, |
1131 | scsi_bufflen(cp))); | 1149 | scsi_bufflen(cp))); |
1132 | 1150 | ||
1133 | cp->result = DID_BUS_BUSY << 16; | 1151 | cp->result = DID_BUS_BUSY << 16; |
1134 | break; | 1152 | break; |
@@ -1140,7 +1158,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1140 | qla_printk(KERN_INFO, ha, | 1158 | qla_printk(KERN_INFO, ha, |
1141 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " | 1159 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " |
1142 | "detected (%x of %x bytes)...returning " | 1160 | "detected (%x of %x bytes)...returning " |
1143 | "error status.\n", ha->host_no, | 1161 | "error status.\n", vha->host_no, |
1144 | cp->device->channel, cp->device->id, | 1162 | cp->device->channel, cp->device->id, |
1145 | cp->device->lun, resid, | 1163 | cp->device->lun, resid, |
1146 | scsi_bufflen(cp)); | 1164 | scsi_bufflen(cp)); |
@@ -1157,7 +1175,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1157 | case CS_DATA_OVERRUN: | 1175 | case CS_DATA_OVERRUN: |
1158 | DEBUG2(printk(KERN_INFO | 1176 | DEBUG2(printk(KERN_INFO |
1159 | "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", | 1177 | "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", |
1160 | ha->host_no, cp->device->id, cp->device->lun, comp_status, | 1178 | vha->host_no, cp->device->id, cp->device->lun, comp_status, |
1161 | scsi_status)); | 1179 | scsi_status)); |
1162 | DEBUG2(printk(KERN_INFO | 1180 | DEBUG2(printk(KERN_INFO |
1163 | "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | 1181 | "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", |
@@ -1183,7 +1201,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1183 | */ | 1201 | */ |
1184 | DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " | 1202 | DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " |
1185 | "pid=%ld, compl status=0x%x, port state=0x%x\n", | 1203 | "pid=%ld, compl status=0x%x, port state=0x%x\n", |
1186 | ha->host_no, cp->device->id, cp->device->lun, | 1204 | vha->host_no, cp->device->id, cp->device->lun, |
1187 | cp->serial_number, comp_status, | 1205 | cp->serial_number, comp_status, |
1188 | atomic_read(&fcport->state))); | 1206 | atomic_read(&fcport->state))); |
1189 | 1207 | ||
@@ -1194,13 +1212,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1194 | */ | 1212 | */ |
1195 | cp->result = DID_TRANSPORT_DISRUPTED << 16; | 1213 | cp->result = DID_TRANSPORT_DISRUPTED << 16; |
1196 | if (atomic_read(&fcport->state) == FCS_ONLINE) | 1214 | if (atomic_read(&fcport->state) == FCS_ONLINE) |
1197 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); | 1215 | qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); |
1198 | break; | 1216 | break; |
1199 | 1217 | ||
1200 | case CS_RESET: | 1218 | case CS_RESET: |
1201 | DEBUG2(printk(KERN_INFO | 1219 | DEBUG2(printk(KERN_INFO |
1202 | "scsi(%ld): RESET status detected 0x%x-0x%x.\n", | 1220 | "scsi(%ld): RESET status detected 0x%x-0x%x.\n", |
1203 | ha->host_no, comp_status, scsi_status)); | 1221 | vha->host_no, comp_status, scsi_status)); |
1204 | 1222 | ||
1205 | cp->result = DID_RESET << 16; | 1223 | cp->result = DID_RESET << 16; |
1206 | break; | 1224 | break; |
@@ -1213,7 +1231,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1213 | */ | 1231 | */ |
1214 | DEBUG2(printk(KERN_INFO | 1232 | DEBUG2(printk(KERN_INFO |
1215 | "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", | 1233 | "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", |
1216 | ha->host_no, comp_status, scsi_status)); | 1234 | vha->host_no, comp_status, scsi_status)); |
1217 | 1235 | ||
1218 | cp->result = DID_RESET << 16; | 1236 | cp->result = DID_RESET << 16; |
1219 | break; | 1237 | break; |
@@ -1229,25 +1247,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1229 | if (IS_FWI2_CAPABLE(ha)) { | 1247 | if (IS_FWI2_CAPABLE(ha)) { |
1230 | DEBUG2(printk(KERN_INFO | 1248 | DEBUG2(printk(KERN_INFO |
1231 | "scsi(%ld:%d:%d:%d): TIMEOUT status detected " | 1249 | "scsi(%ld:%d:%d:%d): TIMEOUT status detected " |
1232 | "0x%x-0x%x\n", ha->host_no, cp->device->channel, | 1250 | "0x%x-0x%x\n", vha->host_no, cp->device->channel, |
1233 | cp->device->id, cp->device->lun, comp_status, | 1251 | cp->device->id, cp->device->lun, comp_status, |
1234 | scsi_status)); | 1252 | scsi_status)); |
1235 | break; | 1253 | break; |
1236 | } | 1254 | } |
1237 | DEBUG2(printk(KERN_INFO | 1255 | DEBUG2(printk(KERN_INFO |
1238 | "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " | 1256 | "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " |
1239 | "sflags=%x.\n", ha->host_no, cp->device->channel, | 1257 | "sflags=%x.\n", vha->host_no, cp->device->channel, |
1240 | cp->device->id, cp->device->lun, comp_status, scsi_status, | 1258 | cp->device->id, cp->device->lun, comp_status, scsi_status, |
1241 | le16_to_cpu(sts->status_flags))); | 1259 | le16_to_cpu(sts->status_flags))); |
1242 | 1260 | ||
1243 | /* Check to see if logout occurred. */ | 1261 | /* Check to see if logout occurred. */ |
1244 | if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) | 1262 | if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) |
1245 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); | 1263 | qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); |
1246 | break; | 1264 | break; |
1247 | 1265 | ||
1248 | default: | 1266 | default: |
1249 | DEBUG3(printk("scsi(%ld): Error detected (unknown status) " | 1267 | DEBUG3(printk("scsi(%ld): Error detected (unknown status) " |
1250 | "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); | 1268 | "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status)); |
1251 | qla_printk(KERN_INFO, ha, | 1269 | qla_printk(KERN_INFO, ha, |
1252 | "Unknown status detected 0x%x-0x%x.\n", | 1270 | "Unknown status detected 0x%x-0x%x.\n", |
1253 | comp_status, scsi_status); | 1271 | comp_status, scsi_status); |
@@ -1257,8 +1275,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1257 | } | 1275 | } |
1258 | 1276 | ||
1259 | /* Place command on done queue. */ | 1277 | /* Place command on done queue. */ |
1260 | if (ha->status_srb == NULL) | 1278 | if (vha->status_srb == NULL) |
1261 | qla2x00_sp_compl(ha, sp); | 1279 | qla2x00_sp_compl(vha, sp); |
1262 | } | 1280 | } |
1263 | 1281 | ||
1264 | /** | 1282 | /** |
@@ -1269,10 +1287,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1269 | * Extended sense data. | 1287 | * Extended sense data. |
1270 | */ | 1288 | */ |
1271 | static void | 1289 | static void |
1272 | qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) | 1290 | qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) |
1273 | { | 1291 | { |
1274 | uint8_t sense_sz = 0; | 1292 | uint8_t sense_sz = 0; |
1275 | srb_t *sp = ha->status_srb; | 1293 | struct qla_hw_data *ha = vha->hw; |
1294 | srb_t *sp = vha->status_srb; | ||
1276 | struct scsi_cmnd *cp; | 1295 | struct scsi_cmnd *cp; |
1277 | 1296 | ||
1278 | if (sp != NULL && sp->request_sense_length != 0) { | 1297 | if (sp != NULL && sp->request_sense_length != 0) { |
@@ -1284,7 +1303,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) | |||
1284 | "cmd is NULL: already returned to OS (sp=%p)\n", | 1303 | "cmd is NULL: already returned to OS (sp=%p)\n", |
1285 | sp); | 1304 | sp); |
1286 | 1305 | ||
1287 | ha->status_srb = NULL; | 1306 | vha->status_srb = NULL; |
1288 | return; | 1307 | return; |
1289 | } | 1308 | } |
1290 | 1309 | ||
@@ -1305,8 +1324,8 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) | |||
1305 | 1324 | ||
1306 | /* Place command on done queue. */ | 1325 | /* Place command on done queue. */ |
1307 | if (sp->request_sense_length == 0) { | 1326 | if (sp->request_sense_length == 0) { |
1308 | ha->status_srb = NULL; | 1327 | vha->status_srb = NULL; |
1309 | qla2x00_sp_compl(ha, sp); | 1328 | qla2x00_sp_compl(vha, sp); |
1310 | } | 1329 | } |
1311 | } | 1330 | } |
1312 | } | 1331 | } |
@@ -1317,10 +1336,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) | |||
1317 | * @pkt: Entry pointer | 1336 | * @pkt: Entry pointer |
1318 | */ | 1337 | */ |
1319 | static void | 1338 | static void |
1320 | qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) | 1339 | qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt) |
1321 | { | 1340 | { |
1322 | srb_t *sp; | 1341 | srb_t *sp; |
1323 | 1342 | struct qla_hw_data *ha = vha->hw; | |
1343 | struct req_que *req = ha->req; | ||
1324 | #if defined(QL_DEBUG_LEVEL_2) | 1344 | #if defined(QL_DEBUG_LEVEL_2) |
1325 | if (pkt->entry_status & RF_INV_E_ORDER) | 1345 | if (pkt->entry_status & RF_INV_E_ORDER) |
1326 | qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); | 1346 | qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); |
@@ -1339,13 +1359,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) | |||
1339 | 1359 | ||
1340 | /* Validate handle. */ | 1360 | /* Validate handle. */ |
1341 | if (pkt->handle < MAX_OUTSTANDING_COMMANDS) | 1361 | if (pkt->handle < MAX_OUTSTANDING_COMMANDS) |
1342 | sp = ha->outstanding_cmds[pkt->handle]; | 1362 | sp = req->outstanding_cmds[pkt->handle]; |
1343 | else | 1363 | else |
1344 | sp = NULL; | 1364 | sp = NULL; |
1345 | 1365 | ||
1346 | if (sp) { | 1366 | if (sp) { |
1347 | /* Free outstanding command slot. */ | 1367 | /* Free outstanding command slot. */ |
1348 | ha->outstanding_cmds[pkt->handle] = NULL; | 1368 | req->outstanding_cmds[pkt->handle] = NULL; |
1349 | 1369 | ||
1350 | /* Bad payload or header */ | 1370 | /* Bad payload or header */ |
1351 | if (pkt->entry_status & | 1371 | if (pkt->entry_status & |
@@ -1357,17 +1377,17 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) | |||
1357 | } else { | 1377 | } else { |
1358 | sp->cmd->result = DID_ERROR << 16; | 1378 | sp->cmd->result = DID_ERROR << 16; |
1359 | } | 1379 | } |
1360 | qla2x00_sp_compl(ha, sp); | 1380 | qla2x00_sp_compl(vha, sp); |
1361 | 1381 | ||
1362 | } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == | 1382 | } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == |
1363 | COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { | 1383 | COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { |
1364 | DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", | 1384 | DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", |
1365 | ha->host_no)); | 1385 | vha->host_no)); |
1366 | qla_printk(KERN_WARNING, ha, | 1386 | qla_printk(KERN_WARNING, ha, |
1367 | "Error entry - invalid handle\n"); | 1387 | "Error entry - invalid handle\n"); |
1368 | 1388 | ||
1369 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 1389 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
1370 | qla2xxx_wake_dpc(ha); | 1390 | qla2xxx_wake_dpc(vha); |
1371 | } | 1391 | } |
1372 | } | 1392 | } |
1373 | 1393 | ||
@@ -1377,10 +1397,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) | |||
1377 | * @mb0: Mailbox0 register | 1397 | * @mb0: Mailbox0 register |
1378 | */ | 1398 | */ |
1379 | static void | 1399 | static void |
1380 | qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | 1400 | qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) |
1381 | { | 1401 | { |
1382 | uint16_t cnt; | 1402 | uint16_t cnt; |
1383 | uint16_t __iomem *wptr; | 1403 | uint16_t __iomem *wptr; |
1404 | struct qla_hw_data *ha = vha->hw; | ||
1384 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1405 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
1385 | 1406 | ||
1386 | /* Load return mailbox registers. */ | 1407 | /* Load return mailbox registers. */ |
@@ -1395,10 +1416,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | |||
1395 | 1416 | ||
1396 | if (ha->mcp) { | 1417 | if (ha->mcp) { |
1397 | DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", | 1418 | DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", |
1398 | __func__, ha->host_no, ha->mcp->mb[0])); | 1419 | __func__, vha->host_no, ha->mcp->mb[0])); |
1399 | } else { | 1420 | } else { |
1400 | DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", | 1421 | DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", |
1401 | __func__, ha->host_no)); | 1422 | __func__, vha->host_no)); |
1402 | } | 1423 | } |
1403 | } | 1424 | } |
1404 | 1425 | ||
@@ -1407,30 +1428,32 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) | |||
1407 | * @ha: SCSI driver HA context | 1428 | * @ha: SCSI driver HA context |
1408 | */ | 1429 | */ |
1409 | void | 1430 | void |
1410 | qla24xx_process_response_queue(struct scsi_qla_host *ha) | 1431 | qla24xx_process_response_queue(struct scsi_qla_host *vha) |
1411 | { | 1432 | { |
1433 | struct qla_hw_data *ha = vha->hw; | ||
1412 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1434 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
1413 | struct sts_entry_24xx *pkt; | 1435 | struct sts_entry_24xx *pkt; |
1436 | struct rsp_que *rsp = ha->rsp; | ||
1414 | 1437 | ||
1415 | if (!ha->flags.online) | 1438 | if (!vha->flags.online) |
1416 | return; | 1439 | return; |
1417 | 1440 | ||
1418 | while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { | 1441 | while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { |
1419 | pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; | 1442 | pkt = (struct sts_entry_24xx *)rsp->ring_ptr; |
1420 | 1443 | ||
1421 | ha->rsp_ring_index++; | 1444 | rsp->ring_index++; |
1422 | if (ha->rsp_ring_index == ha->response_q_length) { | 1445 | if (rsp->ring_index == rsp->length) { |
1423 | ha->rsp_ring_index = 0; | 1446 | rsp->ring_index = 0; |
1424 | ha->response_ring_ptr = ha->response_ring; | 1447 | rsp->ring_ptr = rsp->ring; |
1425 | } else { | 1448 | } else { |
1426 | ha->response_ring_ptr++; | 1449 | rsp->ring_ptr++; |
1427 | } | 1450 | } |
1428 | 1451 | ||
1429 | if (pkt->entry_status != 0) { | 1452 | if (pkt->entry_status != 0) { |
1430 | DEBUG3(printk(KERN_INFO | 1453 | DEBUG3(printk(KERN_INFO |
1431 | "scsi(%ld): Process error entry.\n", ha->host_no)); | 1454 | "scsi(%ld): Process error entry.\n", vha->host_no)); |
1432 | 1455 | ||
1433 | qla2x00_error_entry(ha, (sts_entry_t *) pkt); | 1456 | qla2x00_error_entry(vha, (sts_entry_t *) pkt); |
1434 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 1457 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
1435 | wmb(); | 1458 | wmb(); |
1436 | continue; | 1459 | continue; |
@@ -1438,13 +1461,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha) | |||
1438 | 1461 | ||
1439 | switch (pkt->entry_type) { | 1462 | switch (pkt->entry_type) { |
1440 | case STATUS_TYPE: | 1463 | case STATUS_TYPE: |
1441 | qla2x00_status_entry(ha, pkt); | 1464 | qla2x00_status_entry(vha, pkt); |
1442 | break; | 1465 | break; |
1443 | case STATUS_CONT_TYPE: | 1466 | case STATUS_CONT_TYPE: |
1444 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); | 1467 | qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); |
1445 | break; | 1468 | break; |
1446 | case VP_RPT_ID_IOCB_TYPE: | 1469 | case VP_RPT_ID_IOCB_TYPE: |
1447 | qla24xx_report_id_acquisition(ha, | 1470 | qla24xx_report_id_acquisition(vha, |
1448 | (struct vp_rpt_id_entry_24xx *)pkt); | 1471 | (struct vp_rpt_id_entry_24xx *)pkt); |
1449 | break; | 1472 | break; |
1450 | default: | 1473 | default: |
@@ -1452,7 +1475,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha) | |||
1452 | DEBUG4(printk(KERN_WARNING | 1475 | DEBUG4(printk(KERN_WARNING |
1453 | "scsi(%ld): Received unknown response pkt type %x " | 1476 | "scsi(%ld): Received unknown response pkt type %x " |
1454 | "entry status=%x.\n", | 1477 | "entry status=%x.\n", |
1455 | ha->host_no, pkt->entry_type, pkt->entry_status)); | 1478 | vha->host_no, pkt->entry_type, pkt->entry_status)); |
1456 | break; | 1479 | break; |
1457 | } | 1480 | } |
1458 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 1481 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
@@ -1460,14 +1483,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha) | |||
1460 | } | 1483 | } |
1461 | 1484 | ||
1462 | /* Adjust ring index */ | 1485 | /* Adjust ring index */ |
1463 | WRT_REG_DWORD(®->rsp_q_out, ha->rsp_ring_index); | 1486 | WRT_REG_DWORD(®->rsp_q_out, rsp->ring_index); |
1464 | } | 1487 | } |
1465 | 1488 | ||
1466 | static void | 1489 | static void |
1467 | qla2xxx_check_risc_status(scsi_qla_host_t *ha) | 1490 | qla2xxx_check_risc_status(scsi_qla_host_t *vha) |
1468 | { | 1491 | { |
1469 | int rval; | 1492 | int rval; |
1470 | uint32_t cnt; | 1493 | uint32_t cnt; |
1494 | struct qla_hw_data *ha = vha->hw; | ||
1471 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1495 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
1472 | 1496 | ||
1473 | if (!IS_QLA25XX(ha)) | 1497 | if (!IS_QLA25XX(ha)) |
@@ -1521,25 +1545,29 @@ done: | |||
1521 | irqreturn_t | 1545 | irqreturn_t |
1522 | qla24xx_intr_handler(int irq, void *dev_id) | 1546 | qla24xx_intr_handler(int irq, void *dev_id) |
1523 | { | 1547 | { |
1524 | scsi_qla_host_t *ha; | 1548 | scsi_qla_host_t *vha; |
1549 | struct qla_hw_data *ha; | ||
1525 | struct device_reg_24xx __iomem *reg; | 1550 | struct device_reg_24xx __iomem *reg; |
1526 | int status; | 1551 | int status; |
1527 | unsigned long iter; | 1552 | unsigned long iter; |
1528 | uint32_t stat; | 1553 | uint32_t stat; |
1529 | uint32_t hccr; | 1554 | uint32_t hccr; |
1530 | uint16_t mb[4]; | 1555 | uint16_t mb[4]; |
1556 | struct rsp_que *rsp; | ||
1531 | 1557 | ||
1532 | ha = (scsi_qla_host_t *) dev_id; | 1558 | rsp = (struct rsp_que *) dev_id; |
1533 | if (!ha) { | 1559 | if (!rsp) { |
1534 | printk(KERN_INFO | 1560 | printk(KERN_INFO |
1535 | "%s(): NULL host pointer\n", __func__); | 1561 | "%s(): NULL response queue pointer\n", __func__); |
1536 | return IRQ_NONE; | 1562 | return IRQ_NONE; |
1537 | } | 1563 | } |
1538 | 1564 | ||
1565 | ha = rsp->hw; | ||
1539 | reg = &ha->iobase->isp24; | 1566 | reg = &ha->iobase->isp24; |
1540 | status = 0; | 1567 | status = 0; |
1541 | 1568 | ||
1542 | spin_lock(&ha->hardware_lock); | 1569 | spin_lock(&ha->hardware_lock); |
1570 | vha = qla2x00_get_rsp_host(rsp); | ||
1543 | for (iter = 50; iter--; ) { | 1571 | for (iter = 50; iter--; ) { |
1544 | stat = RD_REG_DWORD(®->host_status); | 1572 | stat = RD_REG_DWORD(®->host_status); |
1545 | if (stat & HSRX_RISC_PAUSED) { | 1573 | if (stat & HSRX_RISC_PAUSED) { |
@@ -1547,7 +1575,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1547 | break; | 1575 | break; |
1548 | 1576 | ||
1549 | if (ha->hw_event_pause_errors == 0) | 1577 | if (ha->hw_event_pause_errors == 0) |
1550 | qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, | 1578 | qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR, |
1551 | 0, MSW(stat), LSW(stat)); | 1579 | 0, MSW(stat), LSW(stat)); |
1552 | else if (ha->hw_event_pause_errors < 0xffffffff) | 1580 | else if (ha->hw_event_pause_errors < 0xffffffff) |
1553 | ha->hw_event_pause_errors++; | 1581 | ha->hw_event_pause_errors++; |
@@ -1557,10 +1585,10 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1557 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " | 1585 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " |
1558 | "Dumping firmware!\n", hccr); | 1586 | "Dumping firmware!\n", hccr); |
1559 | 1587 | ||
1560 | qla2xxx_check_risc_status(ha); | 1588 | qla2xxx_check_risc_status(vha); |
1561 | 1589 | ||
1562 | ha->isp_ops->fw_dump(ha, 1); | 1590 | ha->isp_ops->fw_dump(vha, 1); |
1563 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 1591 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
1564 | break; | 1592 | break; |
1565 | } else if ((stat & HSRX_RISC_INT) == 0) | 1593 | } else if ((stat & HSRX_RISC_INT) == 0) |
1566 | break; | 1594 | break; |
@@ -1570,7 +1598,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1570 | case 0x2: | 1598 | case 0x2: |
1571 | case 0x10: | 1599 | case 0x10: |
1572 | case 0x11: | 1600 | case 0x11: |
1573 | qla24xx_mbx_completion(ha, MSW(stat)); | 1601 | qla24xx_mbx_completion(vha, MSW(stat)); |
1574 | status |= MBX_INTERRUPT; | 1602 | status |= MBX_INTERRUPT; |
1575 | 1603 | ||
1576 | break; | 1604 | break; |
@@ -1579,15 +1607,15 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1579 | mb[1] = RD_REG_WORD(®->mailbox1); | 1607 | mb[1] = RD_REG_WORD(®->mailbox1); |
1580 | mb[2] = RD_REG_WORD(®->mailbox2); | 1608 | mb[2] = RD_REG_WORD(®->mailbox2); |
1581 | mb[3] = RD_REG_WORD(®->mailbox3); | 1609 | mb[3] = RD_REG_WORD(®->mailbox3); |
1582 | qla2x00_async_event(ha, mb); | 1610 | qla2x00_async_event(vha, mb); |
1583 | break; | 1611 | break; |
1584 | case 0x13: | 1612 | case 0x13: |
1585 | qla24xx_process_response_queue(ha); | 1613 | qla24xx_process_response_queue(vha); |
1586 | break; | 1614 | break; |
1587 | default: | 1615 | default: |
1588 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 1616 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
1589 | "(%d).\n", | 1617 | "(%d).\n", |
1590 | ha->host_no, stat & 0xff)); | 1618 | vha->host_no, stat & 0xff)); |
1591 | break; | 1619 | break; |
1592 | } | 1620 | } |
1593 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1621 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
@@ -1607,15 +1635,24 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1607 | static irqreturn_t | 1635 | static irqreturn_t |
1608 | qla24xx_msix_rsp_q(int irq, void *dev_id) | 1636 | qla24xx_msix_rsp_q(int irq, void *dev_id) |
1609 | { | 1637 | { |
1610 | scsi_qla_host_t *ha; | 1638 | scsi_qla_host_t *vha; |
1639 | struct qla_hw_data *ha; | ||
1640 | struct rsp_que *rsp; | ||
1611 | struct device_reg_24xx __iomem *reg; | 1641 | struct device_reg_24xx __iomem *reg; |
1612 | 1642 | ||
1613 | ha = dev_id; | 1643 | rsp = (struct rsp_que *) dev_id; |
1644 | if (!rsp) { | ||
1645 | printk(KERN_INFO | ||
1646 | "%s(): NULL response queue pointer\n", __func__); | ||
1647 | return IRQ_NONE; | ||
1648 | } | ||
1649 | ha = rsp->hw; | ||
1614 | reg = &ha->iobase->isp24; | 1650 | reg = &ha->iobase->isp24; |
1615 | 1651 | ||
1616 | spin_lock_irq(&ha->hardware_lock); | 1652 | spin_lock_irq(&ha->hardware_lock); |
1617 | 1653 | ||
1618 | qla24xx_process_response_queue(ha); | 1654 | vha = qla2x00_get_rsp_host(rsp); |
1655 | qla24xx_process_response_queue(vha); | ||
1619 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1656 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1620 | 1657 | ||
1621 | spin_unlock_irq(&ha->hardware_lock); | 1658 | spin_unlock_irq(&ha->hardware_lock); |
@@ -1626,18 +1663,27 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
1626 | static irqreturn_t | 1663 | static irqreturn_t |
1627 | qla24xx_msix_default(int irq, void *dev_id) | 1664 | qla24xx_msix_default(int irq, void *dev_id) |
1628 | { | 1665 | { |
1629 | scsi_qla_host_t *ha; | 1666 | scsi_qla_host_t *vha; |
1667 | struct qla_hw_data *ha; | ||
1668 | struct rsp_que *rsp; | ||
1630 | struct device_reg_24xx __iomem *reg; | 1669 | struct device_reg_24xx __iomem *reg; |
1631 | int status; | 1670 | int status; |
1632 | uint32_t stat; | 1671 | uint32_t stat; |
1633 | uint32_t hccr; | 1672 | uint32_t hccr; |
1634 | uint16_t mb[4]; | 1673 | uint16_t mb[4]; |
1635 | 1674 | ||
1636 | ha = dev_id; | 1675 | rsp = (struct rsp_que *) dev_id; |
1676 | if (!rsp) { | ||
1677 | DEBUG(printk( | ||
1678 | "%s(): NULL response queue pointer\n", __func__)); | ||
1679 | return IRQ_NONE; | ||
1680 | } | ||
1681 | ha = rsp->hw; | ||
1637 | reg = &ha->iobase->isp24; | 1682 | reg = &ha->iobase->isp24; |
1638 | status = 0; | 1683 | status = 0; |
1639 | 1684 | ||
1640 | spin_lock_irq(&ha->hardware_lock); | 1685 | spin_lock_irq(&ha->hardware_lock); |
1686 | vha = qla2x00_get_rsp_host(rsp); | ||
1641 | do { | 1687 | do { |
1642 | stat = RD_REG_DWORD(®->host_status); | 1688 | stat = RD_REG_DWORD(®->host_status); |
1643 | if (stat & HSRX_RISC_PAUSED) { | 1689 | if (stat & HSRX_RISC_PAUSED) { |
@@ -1645,7 +1691,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1645 | break; | 1691 | break; |
1646 | 1692 | ||
1647 | if (ha->hw_event_pause_errors == 0) | 1693 | if (ha->hw_event_pause_errors == 0) |
1648 | qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, | 1694 | qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR, |
1649 | 0, MSW(stat), LSW(stat)); | 1695 | 0, MSW(stat), LSW(stat)); |
1650 | else if (ha->hw_event_pause_errors < 0xffffffff) | 1696 | else if (ha->hw_event_pause_errors < 0xffffffff) |
1651 | ha->hw_event_pause_errors++; | 1697 | ha->hw_event_pause_errors++; |
@@ -1655,10 +1701,10 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1655 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " | 1701 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " |
1656 | "Dumping firmware!\n", hccr); | 1702 | "Dumping firmware!\n", hccr); |
1657 | 1703 | ||
1658 | qla2xxx_check_risc_status(ha); | 1704 | qla2xxx_check_risc_status(vha); |
1659 | 1705 | ||
1660 | ha->isp_ops->fw_dump(ha, 1); | 1706 | ha->isp_ops->fw_dump(vha, 1); |
1661 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 1707 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
1662 | break; | 1708 | break; |
1663 | } else if ((stat & HSRX_RISC_INT) == 0) | 1709 | } else if ((stat & HSRX_RISC_INT) == 0) |
1664 | break; | 1710 | break; |
@@ -1668,7 +1714,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1668 | case 0x2: | 1714 | case 0x2: |
1669 | case 0x10: | 1715 | case 0x10: |
1670 | case 0x11: | 1716 | case 0x11: |
1671 | qla24xx_mbx_completion(ha, MSW(stat)); | 1717 | qla24xx_mbx_completion(vha, MSW(stat)); |
1672 | status |= MBX_INTERRUPT; | 1718 | status |= MBX_INTERRUPT; |
1673 | 1719 | ||
1674 | break; | 1720 | break; |
@@ -1677,15 +1723,15 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1677 | mb[1] = RD_REG_WORD(®->mailbox1); | 1723 | mb[1] = RD_REG_WORD(®->mailbox1); |
1678 | mb[2] = RD_REG_WORD(®->mailbox2); | 1724 | mb[2] = RD_REG_WORD(®->mailbox2); |
1679 | mb[3] = RD_REG_WORD(®->mailbox3); | 1725 | mb[3] = RD_REG_WORD(®->mailbox3); |
1680 | qla2x00_async_event(ha, mb); | 1726 | qla2x00_async_event(vha, mb); |
1681 | break; | 1727 | break; |
1682 | case 0x13: | 1728 | case 0x13: |
1683 | qla24xx_process_response_queue(ha); | 1729 | qla24xx_process_response_queue(vha); |
1684 | break; | 1730 | break; |
1685 | default: | 1731 | default: |
1686 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 1732 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
1687 | "(%d).\n", | 1733 | "(%d).\n", |
1688 | ha->host_no, stat & 0xff)); | 1734 | vha->host_no, stat & 0xff)); |
1689 | break; | 1735 | break; |
1690 | } | 1736 | } |
1691 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1737 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
@@ -1719,23 +1765,25 @@ static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { | |||
1719 | }; | 1765 | }; |
1720 | 1766 | ||
1721 | static void | 1767 | static void |
1722 | qla24xx_disable_msix(scsi_qla_host_t *ha) | 1768 | qla24xx_disable_msix(struct qla_hw_data *ha) |
1723 | { | 1769 | { |
1724 | int i; | 1770 | int i; |
1725 | struct qla_msix_entry *qentry; | 1771 | struct qla_msix_entry *qentry; |
1772 | struct rsp_que *rsp = ha->rsp; | ||
1726 | 1773 | ||
1727 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) { | 1774 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) { |
1728 | qentry = &ha->msix_entries[imsix_entries[i].index]; | 1775 | qentry = &ha->msix_entries[imsix_entries[i].index]; |
1729 | if (qentry->have_irq) | 1776 | if (qentry->have_irq) |
1730 | free_irq(qentry->msix_vector, ha); | 1777 | free_irq(qentry->msix_vector, rsp); |
1731 | } | 1778 | } |
1732 | pci_disable_msix(ha->pdev); | 1779 | pci_disable_msix(ha->pdev); |
1733 | } | 1780 | } |
1734 | 1781 | ||
1735 | static int | 1782 | static int |
1736 | qla24xx_enable_msix(scsi_qla_host_t *ha) | 1783 | qla24xx_enable_msix(struct qla_hw_data *ha) |
1737 | { | 1784 | { |
1738 | int i, ret; | 1785 | int i, ret; |
1786 | struct rsp_que *rsp = ha->rsp; | ||
1739 | struct msix_entry entries[QLA_MSIX_ENTRIES]; | 1787 | struct msix_entry entries[QLA_MSIX_ENTRIES]; |
1740 | struct qla_msix_entry *qentry; | 1788 | struct qla_msix_entry *qentry; |
1741 | 1789 | ||
@@ -1757,7 +1805,7 @@ qla24xx_enable_msix(scsi_qla_host_t *ha) | |||
1757 | qentry->msix_entry = entries[i].entry; | 1805 | qentry->msix_entry = entries[i].entry; |
1758 | qentry->have_irq = 0; | 1806 | qentry->have_irq = 0; |
1759 | ret = request_irq(qentry->msix_vector, | 1807 | ret = request_irq(qentry->msix_vector, |
1760 | imsix_entries[i].handler, 0, imsix_entries[i].name, ha); | 1808 | imsix_entries[i].handler, 0, imsix_entries[i].name, rsp); |
1761 | if (ret) { | 1809 | if (ret) { |
1762 | qla_printk(KERN_WARNING, ha, | 1810 | qla_printk(KERN_WARNING, ha, |
1763 | "MSI-X: Unable to register handler -- %x/%d.\n", | 1811 | "MSI-X: Unable to register handler -- %x/%d.\n", |
@@ -1773,20 +1821,21 @@ msix_out: | |||
1773 | } | 1821 | } |
1774 | 1822 | ||
1775 | int | 1823 | int |
1776 | qla2x00_request_irqs(scsi_qla_host_t *ha) | 1824 | qla2x00_request_irqs(struct qla_hw_data *ha) |
1777 | { | 1825 | { |
1778 | int ret; | 1826 | int ret; |
1779 | device_reg_t __iomem *reg = ha->iobase; | 1827 | device_reg_t __iomem *reg = ha->iobase; |
1828 | struct rsp_que *rsp = ha->rsp; | ||
1780 | 1829 | ||
1781 | /* If possible, enable MSI-X. */ | 1830 | /* If possible, enable MSI-X. */ |
1782 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) | 1831 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) |
1783 | goto skip_msix; | 1832 | goto skip_msix; |
1784 | 1833 | ||
1785 | if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || | 1834 | if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || |
1786 | !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { | 1835 | !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { |
1787 | DEBUG2(qla_printk(KERN_WARNING, ha, | 1836 | DEBUG2(qla_printk(KERN_WARNING, ha, |
1788 | "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", | 1837 | "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", |
1789 | ha->chip_revision, ha->fw_attributes)); | 1838 | ha->pdev->revision, ha->fw_attributes)); |
1790 | 1839 | ||
1791 | goto skip_msix; | 1840 | goto skip_msix; |
1792 | } | 1841 | } |
@@ -1825,7 +1874,7 @@ skip_msix: | |||
1825 | skip_msi: | 1874 | skip_msi: |
1826 | 1875 | ||
1827 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, | 1876 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, |
1828 | IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); | 1877 | IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); |
1829 | if (ret) { | 1878 | if (ret) { |
1830 | qla_printk(KERN_WARNING, ha, | 1879 | qla_printk(KERN_WARNING, ha, |
1831 | "Failed to reserve interrupt %d already in use.\n", | 1880 | "Failed to reserve interrupt %d already in use.\n", |
@@ -1833,10 +1882,8 @@ skip_msi: | |||
1833 | goto fail; | 1882 | goto fail; |
1834 | } | 1883 | } |
1835 | ha->flags.inta_enabled = 1; | 1884 | ha->flags.inta_enabled = 1; |
1836 | ha->host->irq = ha->pdev->irq; | ||
1837 | clear_risc_ints: | 1885 | clear_risc_ints: |
1838 | 1886 | ||
1839 | ha->isp_ops->disable_intrs(ha); | ||
1840 | spin_lock_irq(&ha->hardware_lock); | 1887 | spin_lock_irq(&ha->hardware_lock); |
1841 | if (IS_FWI2_CAPABLE(ha)) { | 1888 | if (IS_FWI2_CAPABLE(ha)) { |
1842 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); | 1889 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); |
@@ -1853,13 +1900,35 @@ fail: | |||
1853 | } | 1900 | } |
1854 | 1901 | ||
1855 | void | 1902 | void |
1856 | qla2x00_free_irqs(scsi_qla_host_t *ha) | 1903 | qla2x00_free_irqs(scsi_qla_host_t *vha) |
1857 | { | 1904 | { |
1905 | struct qla_hw_data *ha = vha->hw; | ||
1906 | struct rsp_que *rsp = ha->rsp; | ||
1858 | 1907 | ||
1859 | if (ha->flags.msix_enabled) | 1908 | if (ha->flags.msix_enabled) |
1860 | qla24xx_disable_msix(ha); | 1909 | qla24xx_disable_msix(ha); |
1861 | else if (ha->flags.inta_enabled) { | 1910 | else if (ha->flags.inta_enabled) { |
1862 | free_irq(ha->host->irq, ha); | 1911 | free_irq(ha->pdev->irq, rsp); |
1863 | pci_disable_msi(ha->pdev); | 1912 | pci_disable_msi(ha->pdev); |
1864 | } | 1913 | } |
1865 | } | 1914 | } |
1915 | |||
1916 | static struct scsi_qla_host * | ||
1917 | qla2x00_get_rsp_host(struct rsp_que *rsp) | ||
1918 | { | ||
1919 | srb_t *sp; | ||
1920 | struct qla_hw_data *ha = rsp->hw; | ||
1921 | struct scsi_qla_host *vha = NULL; | ||
1922 | struct sts_entry_24xx *pkt = (struct sts_entry_24xx *) rsp->ring_ptr; | ||
1923 | |||
1924 | if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { | ||
1925 | sp = ha->req->outstanding_cmds[pkt->handle]; | ||
1926 | if (sp) | ||
1927 | vha = sp->vha; | ||
1928 | } | ||
1929 | if (!vha) | ||
1930 | /* Invalid entry, handle it in base queue */ | ||
1931 | vha = pci_get_drvdata(ha->pdev); | ||
1932 | |||
1933 | return vha; | ||
1934 | } | ||