aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
1 files changed, 214 insertions, 144 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 14static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15static void qla2x00_isp_cmd(scsi_qla_host_t *ha); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
16 17
17/** 18/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */ 31 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 33 cflags = CF_WRITE;
33 sp->fcport->ha->qla_stats.output_bytes += 34 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd); 35 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 37 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes += 38 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd); 39 scsi_bufflen(sp->cmd);
39 } 40 }
40 return (cflags); 41 return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */ 93 */
93static inline cont_entry_t * 94static inline cont_entry_t *
94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha) 95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
95{ 96{
96 cont_entry_t *cont_pkt; 97 cont_entry_t *cont_pkt;
97
98 /* Adjust ring index. */ 98 /* Adjust ring index. */
99 ha->req_ring_index++; 99 req->ring_index++;
100 if (ha->req_ring_index == ha->request_q_length) { 100 if (req->ring_index == req->length) {
101 ha->req_ring_index = 0; 101 req->ring_index = 0;
102 ha->request_ring_ptr = ha->request_ring; 102 req->ring_ptr = req->ring;
103 } else { 103 } else {
104 ha->request_ring_ptr++; 104 req->ring_ptr++;
105 } 105 }
106 106
107 cont_pkt = (cont_entry_t *)ha->request_ring_ptr; 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 108
109 /* Load packet defaults. */ 109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = 110 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha) 123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 126
127 /* Adjust ring index. */ 127 /* Adjust ring index. */
128 ha->req_ring_index++; 128 req->ring_index++;
129 if (ha->req_ring_index == ha->request_q_length) { 129 if (req->ring_index == req->length) {
130 ha->req_ring_index = 0; 130 req->ring_index = 0;
131 ha->request_ring_ptr = ha->request_ring; 131 req->ring_ptr = req->ring;
132 } else { 132 } else {
133 ha->request_ring_ptr++; 133 req->ring_ptr++;
134 } 134 }
135 135
136 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr; 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 137
138 /* Load packet defaults. */ 138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = 139 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155{ 155{
156 uint16_t avail_dsds; 156 uint16_t avail_dsds;
157 uint32_t *cur_dsd; 157 uint32_t *cur_dsd;
158 scsi_qla_host_t *ha; 158 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd; 159 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 160 struct scatterlist *sg;
161 int i; 161 int i;
162 struct req_que *req;
162 163
163 cmd = sp->cmd; 164 cmd = sp->cmd;
164 165
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
172 return; 173 return;
173 } 174 }
174 175
175 ha = sp->ha; 176 vha = sp->vha;
177 req = sp->que;
176 178
177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178 180
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
190 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
191 * Type 0 IOCB. 193 * Type 0 IOCB.
192 */ 194 */
193 cont_pkt = qla2x00_prep_cont_type0_iocb(ha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
194 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195 avail_dsds = 7; 197 avail_dsds = 7;
196 } 198 }
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214{ 216{
215 uint16_t avail_dsds; 217 uint16_t avail_dsds;
216 uint32_t *cur_dsd; 218 uint32_t *cur_dsd;
217 scsi_qla_host_t *ha; 219 scsi_qla_host_t *vha;
218 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
219 struct scatterlist *sg; 221 struct scatterlist *sg;
220 int i; 222 int i;
223 struct req_que *req;
221 224
222 cmd = sp->cmd; 225 cmd = sp->cmd;
223 226
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
231 return; 234 return;
232 } 235 }
233 236
234 ha = sp->ha; 237 vha = sp->vha;
238 req = sp->que;
235 239
236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237 241
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 * Five DSDs are available in the Continuation 254 * Five DSDs are available in the Continuation
251 * Type 1 IOCB. 255 * Type 1 IOCB.
252 */ 256 */
253 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
254 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255 avail_dsds = 5; 259 avail_dsds = 5;
256 } 260 }
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
274{ 278{
275 int ret, nseg; 279 int ret, nseg;
276 unsigned long flags; 280 unsigned long flags;
277 scsi_qla_host_t *ha; 281 scsi_qla_host_t *vha;
278 struct scsi_cmnd *cmd; 282 struct scsi_cmnd *cmd;
279 uint32_t *clr_ptr; 283 uint32_t *clr_ptr;
280 uint32_t index; 284 uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
284 uint16_t req_cnt; 288 uint16_t req_cnt;
285 uint16_t tot_dsds; 289 uint16_t tot_dsds;
286 struct device_reg_2xxx __iomem *reg; 290 struct device_reg_2xxx __iomem *reg;
291 struct qla_hw_data *ha;
292 struct req_que *req;
293 struct rsp_que *rsp;
287 294
288 /* Setup device pointers. */ 295 /* Setup device pointers. */
289 ret = 0; 296 ret = 0;
290 ha = sp->ha; 297 vha = sp->vha;
298 ha = vha->hw;
291 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
292 cmd = sp->cmd; 300 cmd = sp->cmd;
301 req = ha->req_q_map[0];
302 rsp = ha->rsp_q_map[0];
293 /* So we know we haven't pci_map'ed anything yet */ 303 /* So we know we haven't pci_map'ed anything yet */
294 tot_dsds = 0; 304 tot_dsds = 0;
295 305
296 /* Send marker if required */ 306 /* Send marker if required */
297 if (ha->marker_needed != 0) { 307 if (vha->marker_needed != 0) {
298 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 308 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
309 != QLA_SUCCESS)
299 return (QLA_FUNCTION_FAILED); 310 return (QLA_FUNCTION_FAILED);
300 } 311 vha->marker_needed = 0;
301 ha->marker_needed = 0;
302 } 312 }
303 313
304 /* Acquire ring specific lock */ 314 /* Acquire ring specific lock */
305 spin_lock_irqsave(&ha->hardware_lock, flags); 315 spin_lock_irqsave(&ha->hardware_lock, flags);
306 316
307 /* Check for room in outstanding command list. */ 317 /* Check for room in outstanding command list. */
308 handle = ha->current_outstanding_cmd; 318 handle = req->current_outstanding_cmd;
309 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 319 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310 handle++; 320 handle++;
311 if (handle == MAX_OUTSTANDING_COMMANDS) 321 if (handle == MAX_OUTSTANDING_COMMANDS)
312 handle = 1; 322 handle = 1;
313 if (!ha->outstanding_cmds[handle]) 323 if (!req->outstanding_cmds[handle])
314 break; 324 break;
315 } 325 }
316 if (index == MAX_OUTSTANDING_COMMANDS) 326 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
329 339
330 /* Calculate the number of request entries needed. */ 340 /* Calculate the number of request entries needed. */
331 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 341 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332 if (ha->req_q_cnt < (req_cnt + 2)) { 342 if (req->cnt < (req_cnt + 2)) {
333 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 343 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334 if (ha->req_ring_index < cnt) 344 if (req->ring_index < cnt)
335 ha->req_q_cnt = cnt - ha->req_ring_index; 345 req->cnt = cnt - req->ring_index;
336 else 346 else
337 ha->req_q_cnt = ha->request_q_length - 347 req->cnt = req->length -
338 (ha->req_ring_index - cnt); 348 (req->ring_index - cnt);
339 } 349 }
340 if (ha->req_q_cnt < (req_cnt + 2)) 350 if (req->cnt < (req_cnt + 2))
341 goto queuing_error; 351 goto queuing_error;
342 352
343 /* Build command packet */ 353 /* Build command packet */
344 ha->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
345 ha->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
346 sp->ha = ha; 356 sp->vha = vha;
357 sp->que = req;
347 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348 ha->req_q_cnt -= req_cnt; 359 req->cnt -= req_cnt;
349 360
350 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr; 361 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
351 cmd_pkt->handle = handle; 362 cmd_pkt->handle = handle;
352 /* Zero out remaining portion of packet. */ 363 /* Zero out remaining portion of packet. */
353 clr_ptr = (uint32_t *)cmd_pkt + 2; 364 clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
373 wmb(); 384 wmb();
374 385
375 /* Adjust ring index. */ 386 /* Adjust ring index. */
376 ha->req_ring_index++; 387 req->ring_index++;
377 if (ha->req_ring_index == ha->request_q_length) { 388 if (req->ring_index == req->length) {
378 ha->req_ring_index = 0; 389 req->ring_index = 0;
379 ha->request_ring_ptr = ha->request_ring; 390 req->ring_ptr = req->ring;
380 } else 391 } else
381 ha->request_ring_ptr++; 392 req->ring_ptr++;
382 393
383 sp->flags |= SRB_DMA_VALID; 394 sp->flags |= SRB_DMA_VALID;
384 395
385 /* Set chip new ring index. */ 396 /* Set chip new ring index. */
386 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); 397 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
387 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 398 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
388 399
389 /* Manage unprocessed RIO/ZIO commands in response queue. */ 400 /* Manage unprocessed RIO/ZIO commands in response queue. */
390 if (ha->flags.process_response_queue && 401 if (vha->flags.process_response_queue &&
391 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 402 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
392 qla2x00_process_response_queue(ha); 403 qla2x00_process_response_queue(rsp);
393 404
394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 405 spin_unlock_irqrestore(&ha->hardware_lock, flags);
395 return (QLA_SUCCESS); 406 return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
415 * Returns non-zero if a failure occurred, else zero. 426 * Returns non-zero if a failure occurred, else zero.
416 */ 427 */
417int 428int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 429__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
419 uint8_t type) 430 struct rsp_que *rsp, uint16_t loop_id,
431 uint16_t lun, uint8_t type)
420{ 432{
421 mrk_entry_t *mrk; 433 mrk_entry_t *mrk;
422 struct mrk_entry_24xx *mrk24; 434 struct mrk_entry_24xx *mrk24;
423 scsi_qla_host_t *pha = to_qla_parent(ha); 435 struct qla_hw_data *ha = vha->hw;
436 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
424 437
425 mrk24 = NULL; 438 mrk24 = NULL;
426 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); 439 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
427 if (mrk == NULL) { 440 if (mrk == NULL) {
428 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 441 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429 __func__, ha->host_no)); 442 __func__, base_vha->host_no));
430 443
431 return (QLA_FUNCTION_FAILED); 444 return (QLA_FUNCTION_FAILED);
432 } 445 }
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
440 mrk24->lun[1] = LSB(lun); 453 mrk24->lun[1] = LSB(lun);
441 mrk24->lun[2] = MSB(lun); 454 mrk24->lun[2] = MSB(lun);
442 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 455 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443 mrk24->vp_index = ha->vp_idx; 456 mrk24->vp_index = vha->vp_idx;
444 } else { 457 } else {
445 SET_TARGET_ID(ha, mrk->target, loop_id); 458 SET_TARGET_ID(ha, mrk->target, loop_id);
446 mrk->lun = cpu_to_le16(lun); 459 mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
448 } 461 }
449 wmb(); 462 wmb();
450 463
451 qla2x00_isp_cmd(pha); 464 qla2x00_isp_cmd(vha, req);
452 465
453 return (QLA_SUCCESS); 466 return (QLA_SUCCESS);
454} 467}
455 468
456int 469int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 470qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
458 uint8_t type) 471 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
472 uint8_t type)
459{ 473{
460 int ret; 474 int ret;
461 unsigned long flags = 0; 475 unsigned long flags = 0;
462 scsi_qla_host_t *pha = to_qla_parent(ha);
463 476
464 spin_lock_irqsave(&pha->hardware_lock, flags); 477 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
465 ret = __qla2x00_marker(ha, loop_id, lun, type); 478 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
466 spin_unlock_irqrestore(&pha->hardware_lock, flags); 479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
467 480
468 return (ret); 481 return (ret);
469} 482}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
477 * Returns NULL if function failed, else, a pointer to the request packet. 490 * Returns NULL if function failed, else, a pointer to the request packet.
478 */ 491 */
479static request_t * 492static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha) 493qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
494 struct rsp_que *rsp)
481{ 495{
482 device_reg_t __iomem *reg = ha->iobase; 496 struct qla_hw_data *ha = vha->hw;
497 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
483 request_t *pkt = NULL; 498 request_t *pkt = NULL;
484 uint16_t cnt; 499 uint16_t cnt;
485 uint32_t *dword_ptr; 500 uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
488 503
489 /* Wait 1 second for slot. */ 504 /* Wait 1 second for slot. */
490 for (timer = HZ; timer; timer--) { 505 for (timer = HZ; timer; timer--) {
491 if ((req_cnt + 2) >= ha->req_q_cnt) { 506 if ((req_cnt + 2) >= req->cnt) {
492 /* Calculate number of free request entries. */ 507 /* Calculate number of free request entries. */
493 if (IS_FWI2_CAPABLE(ha)) 508 if (ha->mqenable)
494 cnt = (uint16_t)RD_REG_DWORD( 509 cnt = (uint16_t)
495 &reg->isp24.req_q_out); 510 RD_REG_DWORD(&reg->isp25mq.req_q_out);
496 else 511 else {
497 cnt = qla2x00_debounce_register( 512 if (IS_FWI2_CAPABLE(ha))
498 ISP_REQ_Q_OUT(ha, &reg->isp)); 513 cnt = (uint16_t)RD_REG_DWORD(
499 if (ha->req_ring_index < cnt) 514 &reg->isp24.req_q_out);
500 ha->req_q_cnt = cnt - ha->req_ring_index; 515 else
516 cnt = qla2x00_debounce_register(
517 ISP_REQ_Q_OUT(ha, &reg->isp));
518 }
519 if (req->ring_index < cnt)
520 req->cnt = cnt - req->ring_index;
501 else 521 else
502 ha->req_q_cnt = ha->request_q_length - 522 req->cnt = req->length -
503 (ha->req_ring_index - cnt); 523 (req->ring_index - cnt);
504 } 524 }
505 /* If room for request in request ring. */ 525 /* If room for request in request ring. */
506 if ((req_cnt + 2) < ha->req_q_cnt) { 526 if ((req_cnt + 2) < req->cnt) {
507 ha->req_q_cnt--; 527 req->cnt--;
508 pkt = ha->request_ring_ptr; 528 pkt = req->ring_ptr;
509 529
510 /* Zero out packet. */ 530 /* Zero out packet. */
511 dword_ptr = (uint32_t *)pkt; 531 dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
513 *dword_ptr++ = 0; 533 *dword_ptr++ = 0;
514 534
515 /* Set system defined field. */ 535 /* Set system defined field. */
516 pkt->sys_define = (uint8_t)ha->req_ring_index; 536 pkt->sys_define = (uint8_t)req->ring_index;
517 537
518 /* Set entry count. */ 538 /* Set entry count. */
519 pkt->entry_count = 1; 539 pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
522 } 542 }
523 543
524 /* Release ring specific lock */ 544 /* Release ring specific lock */
525 spin_unlock(&ha->hardware_lock); 545 spin_unlock_irq(&ha->hardware_lock);
526 546
527 udelay(2); /* 2 us */ 547 udelay(2); /* 2 us */
528 548
529 /* Check for pending interrupts. */ 549 /* Check for pending interrupts. */
530 /* During init we issue marker directly */ 550 /* During init we issue marker directly */
531 if (!ha->marker_needed && !ha->flags.init_done) 551 if (!vha->marker_needed && !vha->flags.init_done)
532 qla2x00_poll(ha); 552 qla2x00_poll(rsp);
533
534 spin_lock_irq(&ha->hardware_lock); 553 spin_lock_irq(&ha->hardware_lock);
535 } 554 }
536 if (!pkt) { 555 if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
547 * Note: The caller must hold the hardware lock before calling this routine. 566 * Note: The caller must hold the hardware lock before calling this routine.
548 */ 567 */
549static void 568static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha) 569qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
551{ 570{
552 device_reg_t __iomem *reg = ha->iobase; 571 struct qla_hw_data *ha = vha->hw;
572 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
573 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
553 574
554 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 575 DEBUG5(printk("%s(): IOCB data:\n", __func__));
555 DEBUG5(qla2x00_dump_buffer( 576 DEBUG5(qla2x00_dump_buffer(
556 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE)); 577 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
557 578
558 /* Adjust ring index. */ 579 /* Adjust ring index. */
559 ha->req_ring_index++; 580 req->ring_index++;
560 if (ha->req_ring_index == ha->request_q_length) { 581 if (req->ring_index == req->length) {
561 ha->req_ring_index = 0; 582 req->ring_index = 0;
562 ha->request_ring_ptr = ha->request_ring; 583 req->ring_ptr = req->ring;
563 } else 584 } else
564 ha->request_ring_ptr++; 585 req->ring_ptr++;
565 586
566 /* Set chip new ring index. */ 587 /* Set chip new ring index. */
567 if (IS_FWI2_CAPABLE(ha)) { 588 if (ha->mqenable) {
568 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 589 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
569 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 590 RD_REG_DWORD(&ioreg->hccr);
570 } else { 591 }
571 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index); 592 else {
572 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 593 if (IS_FWI2_CAPABLE(ha)) {
594 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
595 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
596 } else {
597 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
598 req->ring_index);
599 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
600 }
573 } 601 }
574 602
575} 603}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
610{ 638{
611 uint16_t avail_dsds; 639 uint16_t avail_dsds;
612 uint32_t *cur_dsd; 640 uint32_t *cur_dsd;
613 scsi_qla_host_t *ha; 641 scsi_qla_host_t *vha;
614 struct scsi_cmnd *cmd; 642 struct scsi_cmnd *cmd;
615 struct scatterlist *sg; 643 struct scatterlist *sg;
616 int i; 644 int i;
645 struct req_que *req;
617 646
618 cmd = sp->cmd; 647 cmd = sp->cmd;
619 648
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
627 return; 656 return;
628 } 657 }
629 658
630 ha = sp->ha; 659 vha = sp->vha;
660 req = sp->que;
631 661
632 /* Set transfer direction */ 662 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 663 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->task_mgmt_flags = 664 cmd_pkt->task_mgmt_flags =
635 __constant_cpu_to_le16(TMF_WRITE_DATA); 665 __constant_cpu_to_le16(TMF_WRITE_DATA);
636 sp->fcport->ha->qla_stats.output_bytes += 666 sp->fcport->vha->hw->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd); 667 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 668 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->task_mgmt_flags = 669 cmd_pkt->task_mgmt_flags =
640 __constant_cpu_to_le16(TMF_READ_DATA); 670 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes += 671 sp->fcport->vha->hw->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd); 672 scsi_bufflen(sp->cmd);
643 } 673 }
644 674
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
658 * Five DSDs are available in the Continuation 688 * Five DSDs are available in the Continuation
659 * Type 1 IOCB. 689 * Type 1 IOCB.
660 */ 690 */
661 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 691 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
662 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 692 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663 avail_dsds = 5; 693 avail_dsds = 5;
664 } 694 }
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
683{ 713{
684 int ret, nseg; 714 int ret, nseg;
685 unsigned long flags; 715 unsigned long flags;
686 scsi_qla_host_t *ha, *pha;
687 struct scsi_cmnd *cmd;
688 uint32_t *clr_ptr; 716 uint32_t *clr_ptr;
689 uint32_t index; 717 uint32_t index;
690 uint32_t handle; 718 uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
692 uint16_t cnt; 720 uint16_t cnt;
693 uint16_t req_cnt; 721 uint16_t req_cnt;
694 uint16_t tot_dsds; 722 uint16_t tot_dsds;
695 struct device_reg_24xx __iomem *reg; 723 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha;
727 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id;
696 729
697 /* Setup device pointers. */ 730 /* Setup device pointers. */
698 ret = 0; 731 ret = 0;
699 ha = sp->ha; 732 que_id = vha->req_ques[0];
700 pha = to_qla_parent(ha); 733
701 reg = &ha->iobase->isp24; 734 req = ha->req_q_map[que_id];
702 cmd = sp->cmd; 735 sp->que = req;
736
737 if (req->rsp)
738 rsp = req->rsp;
739 else
740 rsp = ha->rsp_q_map[que_id];
703 /* So we know we haven't pci_map'ed anything yet */ 741 /* So we know we haven't pci_map'ed anything yet */
704 tot_dsds = 0; 742 tot_dsds = 0;
705 743
706 /* Send marker if required */ 744 /* Send marker if required */
707 if (ha->marker_needed != 0) { 745 if (vha->marker_needed != 0) {
708 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 746 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
747 != QLA_SUCCESS)
709 return QLA_FUNCTION_FAILED; 748 return QLA_FUNCTION_FAILED;
710 } 749 vha->marker_needed = 0;
711 ha->marker_needed = 0;
712 } 750 }
713 751
714 /* Acquire ring specific lock */ 752 /* Acquire ring specific lock */
715 spin_lock_irqsave(&pha->hardware_lock, flags); 753 spin_lock_irqsave(&ha->hardware_lock, flags);
716 754
717 /* Check for room in outstanding command list. */ 755 /* Check for room in outstanding command list. */
718 handle = ha->current_outstanding_cmd; 756 handle = req->current_outstanding_cmd;
719 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 757 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720 handle++; 758 handle++;
721 if (handle == MAX_OUTSTANDING_COMMANDS) 759 if (handle == MAX_OUTSTANDING_COMMANDS)
722 handle = 1; 760 handle = 1;
723 if (!ha->outstanding_cmds[handle]) 761 if (!req->outstanding_cmds[handle])
724 break; 762 break;
725 } 763 }
726 if (index == MAX_OUTSTANDING_COMMANDS) 764 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
738 tot_dsds = nseg; 776 tot_dsds = nseg;
739 777
740 req_cnt = qla24xx_calc_iocbs(tot_dsds); 778 req_cnt = qla24xx_calc_iocbs(tot_dsds);
741 if (ha->req_q_cnt < (req_cnt + 2)) { 779 if (req->cnt < (req_cnt + 2)) {
742 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out); 780 cnt = ha->isp_ops->rd_req_reg(ha, req->id);
743 if (ha->req_ring_index < cnt) 781
744 ha->req_q_cnt = cnt - ha->req_ring_index; 782 if (req->ring_index < cnt)
783 req->cnt = cnt - req->ring_index;
745 else 784 else
746 ha->req_q_cnt = ha->request_q_length - 785 req->cnt = req->length -
747 (ha->req_ring_index - cnt); 786 (req->ring_index - cnt);
748 } 787 }
749 if (ha->req_q_cnt < (req_cnt + 2)) 788 if (req->cnt < (req_cnt + 2))
750 goto queuing_error; 789 goto queuing_error;
751 790
752 /* Build command packet. */ 791 /* Build command packet. */
753 ha->current_outstanding_cmd = handle; 792 req->current_outstanding_cmd = handle;
754 ha->outstanding_cmds[handle] = sp; 793 req->outstanding_cmds[handle] = sp;
755 sp->ha = ha; 794 sp->vha = vha;
756 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757 ha->req_q_cnt -= req_cnt; 796 req->cnt -= req_cnt;
758 797
759 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; 798 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
760 cmd_pkt->handle = handle; 799 cmd_pkt->handle = handle;
761 800
762 /* Zero out remaining portion of packet. */ 801 /* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
789 wmb(); 828 wmb();
790 829
791 /* Adjust ring index. */ 830 /* Adjust ring index. */
792 ha->req_ring_index++; 831 req->ring_index++;
793 if (ha->req_ring_index == ha->request_q_length) { 832 if (req->ring_index == req->length) {
794 ha->req_ring_index = 0; 833 req->ring_index = 0;
795 ha->request_ring_ptr = ha->request_ring; 834 req->ring_ptr = req->ring;
796 } else 835 } else
797 ha->request_ring_ptr++; 836 req->ring_ptr++;
798 837
799 sp->flags |= SRB_DMA_VALID; 838 sp->flags |= SRB_DMA_VALID;
800 839
801 /* Set chip new ring index. */ 840 /* Set chip new ring index. */
802 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index); 841 ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
803 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
804 842
805 /* Manage unprocessed RIO/ZIO commands in response queue. */ 843 /* Manage unprocessed RIO/ZIO commands in response queue. */
806 if (ha->flags.process_response_queue && 844 if (vha->flags.process_response_queue &&
807 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 845 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
808 qla24xx_process_response_queue(ha); 846 qla24xx_process_response_queue(rsp);
809 847
810 spin_unlock_irqrestore(&pha->hardware_lock, flags); 848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 return QLA_SUCCESS; 849 return QLA_SUCCESS;
812 850
813queuing_error: 851queuing_error:
814 if (tot_dsds) 852 if (tot_dsds)
815 scsi_dma_unmap(cmd); 853 scsi_dma_unmap(cmd);
816 854
817 spin_unlock_irqrestore(&pha->hardware_lock, flags); 855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
818 856
819 return QLA_FUNCTION_FAILED; 857 return QLA_FUNCTION_FAILED;
820} 858}
859
860uint16_t
861qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
862{
863 device_reg_t __iomem *reg = (void *) ha->iobase;
864 return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
865}
866
867uint16_t
868qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
869{
870 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
871 return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
872}
873
874void
875qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
876{
877 device_reg_t __iomem *reg = (void *) ha->iobase;
878 WRT_REG_DWORD(&reg->isp24.req_q_in, index);
879 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
880}
881
882void
883qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
884{
885 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
886 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
887 WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
888 RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
889}
890