diff options
author | Anirban Chakraborty <anirban.chakraborty@qlogic.com> | 2008-12-09 19:45:39 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-12-29 12:24:33 -0500 |
commit | 73208dfd7ab19f379d73e8a0fbf30f92c203e5e8 (patch) | |
tree | f69be5e89817d17b066ece4dbe04e395339c0754 /drivers/scsi/qla2xxx/qla_iocb.c | |
parent | 85b4aa4926a50210b683ac89326e338e7d131211 (diff) |
[SCSI] qla2xxx: add support for multi-queue adapter
Following changes have been made.
1. qla_hw_data structure holds an array for request queue pointers,
and an array for response queue pointers.
2. The base request and response queues are created by default.
3. Additional request and response queues are created at the time of vport
creation. If queue resources are exhausted during vport creation, newly
created vports use the default queue.
4. Requests are sent to the request queue that the vport was assigned
in the beginning.
5. Responses are completed on the response queue with which the request queue
is associated with.
[fixup memcpy argument reversal spotted by davej@redhat.com]
Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 158 |
1 files changed, 100 insertions, 58 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 0c145c9e0cd9..6d2bd97c3b11 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -11,8 +11,9 @@ | |||
11 | 11 | ||
12 | #include <scsi/scsi_tcq.h> | 12 | #include <scsi/scsi_tcq.h> |
13 | 13 | ||
14 | static request_t *qla2x00_req_pkt(scsi_qla_host_t *); | 14 | static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, |
15 | static void qla2x00_isp_cmd(scsi_qla_host_t *); | 15 | struct rsp_que *rsp); |
16 | static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); | ||
16 | 17 | ||
17 | /** | 18 | /** |
18 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. | 19 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. |
@@ -91,10 +92,9 @@ qla2x00_calc_iocbs_64(uint16_t dsds) | |||
91 | * Returns a pointer to the Continuation Type 0 IOCB packet. | 92 | * Returns a pointer to the Continuation Type 0 IOCB packet. |
92 | */ | 93 | */ |
93 | static inline cont_entry_t * | 94 | static inline cont_entry_t * |
94 | qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha) | 95 | qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) |
95 | { | 96 | { |
96 | cont_entry_t *cont_pkt; | 97 | cont_entry_t *cont_pkt; |
97 | struct req_que *req = vha->hw->req; | ||
98 | /* Adjust ring index. */ | 98 | /* Adjust ring index. */ |
99 | req->ring_index++; | 99 | req->ring_index++; |
100 | if (req->ring_index == req->length) { | 100 | if (req->ring_index == req->length) { |
@@ -120,10 +120,9 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha) | |||
120 | * Returns a pointer to the continuation type 1 IOCB packet. | 120 | * Returns a pointer to the continuation type 1 IOCB packet. |
121 | */ | 121 | */ |
122 | static inline cont_a64_entry_t * | 122 | static inline cont_a64_entry_t * |
123 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) | 123 | qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) |
124 | { | 124 | { |
125 | cont_a64_entry_t *cont_pkt; | 125 | cont_a64_entry_t *cont_pkt; |
126 | struct req_que *req = vha->hw->req; | ||
127 | 126 | ||
128 | /* Adjust ring index. */ | 127 | /* Adjust ring index. */ |
129 | req->ring_index++; | 128 | req->ring_index++; |
@@ -160,6 +159,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
160 | struct scsi_cmnd *cmd; | 159 | struct scsi_cmnd *cmd; |
161 | struct scatterlist *sg; | 160 | struct scatterlist *sg; |
162 | int i; | 161 | int i; |
162 | struct req_que *req; | ||
163 | uint16_t que_id; | ||
163 | 164 | ||
164 | cmd = sp->cmd; | 165 | cmd = sp->cmd; |
165 | 166 | ||
@@ -174,6 +175,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
174 | } | 175 | } |
175 | 176 | ||
176 | vha = sp->vha; | 177 | vha = sp->vha; |
178 | que_id = vha->req_ques[0]; | ||
179 | req = vha->hw->req_q_map[que_id]; | ||
177 | 180 | ||
178 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 181 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
179 | 182 | ||
@@ -191,7 +194,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
191 | * Seven DSDs are available in the Continuation | 194 | * Seven DSDs are available in the Continuation |
192 | * Type 0 IOCB. | 195 | * Type 0 IOCB. |
193 | */ | 196 | */ |
194 | cont_pkt = qla2x00_prep_cont_type0_iocb(vha); | 197 | cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); |
195 | cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; | 198 | cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; |
196 | avail_dsds = 7; | 199 | avail_dsds = 7; |
197 | } | 200 | } |
@@ -219,6 +222,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
219 | struct scsi_cmnd *cmd; | 222 | struct scsi_cmnd *cmd; |
220 | struct scatterlist *sg; | 223 | struct scatterlist *sg; |
221 | int i; | 224 | int i; |
225 | struct req_que *req; | ||
226 | uint16_t que_id; | ||
222 | 227 | ||
223 | cmd = sp->cmd; | 228 | cmd = sp->cmd; |
224 | 229 | ||
@@ -233,6 +238,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
233 | } | 238 | } |
234 | 239 | ||
235 | vha = sp->vha; | 240 | vha = sp->vha; |
241 | que_id = vha->req_ques[0]; | ||
242 | req = vha->hw->req_q_map[que_id]; | ||
236 | 243 | ||
237 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 244 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
238 | 245 | ||
@@ -251,7 +258,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
251 | * Five DSDs are available in the Continuation | 258 | * Five DSDs are available in the Continuation |
252 | * Type 1 IOCB. | 259 | * Type 1 IOCB. |
253 | */ | 260 | */ |
254 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 261 | cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); |
255 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 262 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
256 | avail_dsds = 5; | 263 | avail_dsds = 5; |
257 | } | 264 | } |
@@ -287,6 +294,7 @@ qla2x00_start_scsi(srb_t *sp) | |||
287 | struct device_reg_2xxx __iomem *reg; | 294 | struct device_reg_2xxx __iomem *reg; |
288 | struct qla_hw_data *ha; | 295 | struct qla_hw_data *ha; |
289 | struct req_que *req; | 296 | struct req_que *req; |
297 | struct rsp_que *rsp; | ||
290 | 298 | ||
291 | /* Setup device pointers. */ | 299 | /* Setup device pointers. */ |
292 | ret = 0; | 300 | ret = 0; |
@@ -294,13 +302,15 @@ qla2x00_start_scsi(srb_t *sp) | |||
294 | ha = vha->hw; | 302 | ha = vha->hw; |
295 | reg = &ha->iobase->isp; | 303 | reg = &ha->iobase->isp; |
296 | cmd = sp->cmd; | 304 | cmd = sp->cmd; |
297 | req = ha->req; | 305 | req = ha->req_q_map[0]; |
306 | rsp = ha->rsp_q_map[0]; | ||
298 | /* So we know we haven't pci_map'ed anything yet */ | 307 | /* So we know we haven't pci_map'ed anything yet */ |
299 | tot_dsds = 0; | 308 | tot_dsds = 0; |
300 | 309 | ||
301 | /* Send marker if required */ | 310 | /* Send marker if required */ |
302 | if (vha->marker_needed != 0) { | 311 | if (vha->marker_needed != 0) { |
303 | if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | 312 | if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) |
313 | != QLA_SUCCESS) | ||
304 | return (QLA_FUNCTION_FAILED); | 314 | return (QLA_FUNCTION_FAILED); |
305 | vha->marker_needed = 0; | 315 | vha->marker_needed = 0; |
306 | } | 316 | } |
@@ -392,8 +402,8 @@ qla2x00_start_scsi(srb_t *sp) | |||
392 | 402 | ||
393 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | 403 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
394 | if (vha->flags.process_response_queue && | 404 | if (vha->flags.process_response_queue && |
395 | ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 405 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
396 | qla2x00_process_response_queue(vha); | 406 | qla2x00_process_response_queue(rsp); |
397 | 407 | ||
398 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 408 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
399 | return (QLA_SUCCESS); | 409 | return (QLA_SUCCESS); |
@@ -419,8 +429,9 @@ queuing_error: | |||
419 | * Returns non-zero if a failure occurred, else zero. | 429 | * Returns non-zero if a failure occurred, else zero. |
420 | */ | 430 | */ |
421 | int | 431 | int |
422 | __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | 432 | __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
423 | uint8_t type) | 433 | struct rsp_que *rsp, uint16_t loop_id, |
434 | uint16_t lun, uint8_t type) | ||
424 | { | 435 | { |
425 | mrk_entry_t *mrk; | 436 | mrk_entry_t *mrk; |
426 | struct mrk_entry_24xx *mrk24; | 437 | struct mrk_entry_24xx *mrk24; |
@@ -428,7 +439,7 @@ __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
428 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 439 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
429 | 440 | ||
430 | mrk24 = NULL; | 441 | mrk24 = NULL; |
431 | mrk = (mrk_entry_t *)qla2x00_req_pkt(base_vha); | 442 | mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp); |
432 | if (mrk == NULL) { | 443 | if (mrk == NULL) { |
433 | DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", | 444 | DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", |
434 | __func__, base_vha->host_no)); | 445 | __func__, base_vha->host_no)); |
@@ -453,22 +464,22 @@ __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
453 | } | 464 | } |
454 | wmb(); | 465 | wmb(); |
455 | 466 | ||
456 | qla2x00_isp_cmd(base_vha); | 467 | qla2x00_isp_cmd(vha, req); |
457 | 468 | ||
458 | return (QLA_SUCCESS); | 469 | return (QLA_SUCCESS); |
459 | } | 470 | } |
460 | 471 | ||
461 | int | 472 | int |
462 | qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | 473 | qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
463 | uint8_t type) | 474 | struct rsp_que *rsp, uint16_t loop_id, uint16_t lun, |
475 | uint8_t type) | ||
464 | { | 476 | { |
465 | int ret; | 477 | int ret; |
466 | unsigned long flags = 0; | 478 | unsigned long flags = 0; |
467 | struct qla_hw_data *ha = vha->hw; | ||
468 | 479 | ||
469 | spin_lock_irqsave(&ha->hardware_lock, flags); | 480 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); |
470 | ret = __qla2x00_marker(vha, loop_id, lun, type); | 481 | ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); |
471 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 482 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
472 | 483 | ||
473 | return (ret); | 484 | return (ret); |
474 | } | 485 | } |
@@ -482,27 +493,32 @@ qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
482 | * Returns NULL if function failed, else, a pointer to the request packet. | 493 | * Returns NULL if function failed, else, a pointer to the request packet. |
483 | */ | 494 | */ |
484 | static request_t * | 495 | static request_t * |
485 | qla2x00_req_pkt(scsi_qla_host_t *vha) | 496 | qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, |
497 | struct rsp_que *rsp) | ||
486 | { | 498 | { |
487 | struct qla_hw_data *ha = vha->hw; | 499 | struct qla_hw_data *ha = vha->hw; |
488 | device_reg_t __iomem *reg = ha->iobase; | 500 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); |
489 | request_t *pkt = NULL; | 501 | request_t *pkt = NULL; |
490 | uint16_t cnt; | 502 | uint16_t cnt; |
491 | uint32_t *dword_ptr; | 503 | uint32_t *dword_ptr; |
492 | uint32_t timer; | 504 | uint32_t timer; |
493 | uint16_t req_cnt = 1; | 505 | uint16_t req_cnt = 1; |
494 | struct req_que *req = ha->req; | ||
495 | 506 | ||
496 | /* Wait 1 second for slot. */ | 507 | /* Wait 1 second for slot. */ |
497 | for (timer = HZ; timer; timer--) { | 508 | for (timer = HZ; timer; timer--) { |
498 | if ((req_cnt + 2) >= req->cnt) { | 509 | if ((req_cnt + 2) >= req->cnt) { |
499 | /* Calculate number of free request entries. */ | 510 | /* Calculate number of free request entries. */ |
500 | if (IS_FWI2_CAPABLE(ha)) | 511 | if (ha->mqenable) |
501 | cnt = (uint16_t)RD_REG_DWORD( | 512 | cnt = (uint16_t) |
502 | ®->isp24.req_q_out); | 513 | RD_REG_DWORD(®->isp25mq.req_q_out); |
503 | else | 514 | else { |
504 | cnt = qla2x00_debounce_register( | 515 | if (IS_FWI2_CAPABLE(ha)) |
505 | ISP_REQ_Q_OUT(ha, ®->isp)); | 516 | cnt = (uint16_t)RD_REG_DWORD( |
517 | ®->isp24.req_q_out); | ||
518 | else | ||
519 | cnt = qla2x00_debounce_register( | ||
520 | ISP_REQ_Q_OUT(ha, ®->isp)); | ||
521 | } | ||
506 | if (req->ring_index < cnt) | 522 | if (req->ring_index < cnt) |
507 | req->cnt = cnt - req->ring_index; | 523 | req->cnt = cnt - req->ring_index; |
508 | else | 524 | else |
@@ -536,7 +552,7 @@ qla2x00_req_pkt(scsi_qla_host_t *vha) | |||
536 | /* Check for pending interrupts. */ | 552 | /* Check for pending interrupts. */ |
537 | /* During init we issue marker directly */ | 553 | /* During init we issue marker directly */ |
538 | if (!vha->marker_needed && !vha->flags.init_done) | 554 | if (!vha->marker_needed && !vha->flags.init_done) |
539 | qla2x00_poll(ha->rsp); | 555 | qla2x00_poll(rsp); |
540 | spin_lock_irq(&ha->hardware_lock); | 556 | spin_lock_irq(&ha->hardware_lock); |
541 | } | 557 | } |
542 | if (!pkt) { | 558 | if (!pkt) { |
@@ -553,11 +569,10 @@ qla2x00_req_pkt(scsi_qla_host_t *vha) | |||
553 | * Note: The caller must hold the hardware lock before calling this routine. | 569 | * Note: The caller must hold the hardware lock before calling this routine. |
554 | */ | 570 | */ |
555 | static void | 571 | static void |
556 | qla2x00_isp_cmd(scsi_qla_host_t *vha) | 572 | qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) |
557 | { | 573 | { |
558 | struct qla_hw_data *ha = vha->hw; | 574 | struct qla_hw_data *ha = vha->hw; |
559 | device_reg_t __iomem *reg = ha->iobase; | 575 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); |
560 | struct req_que *req = ha->req; | ||
561 | 576 | ||
562 | DEBUG5(printk("%s(): IOCB data:\n", __func__)); | 577 | DEBUG5(printk("%s(): IOCB data:\n", __func__)); |
563 | DEBUG5(qla2x00_dump_buffer( | 578 | DEBUG5(qla2x00_dump_buffer( |
@@ -572,12 +587,17 @@ qla2x00_isp_cmd(scsi_qla_host_t *vha) | |||
572 | req->ring_ptr++; | 587 | req->ring_ptr++; |
573 | 588 | ||
574 | /* Set chip new ring index. */ | 589 | /* Set chip new ring index. */ |
575 | if (IS_FWI2_CAPABLE(ha)) { | 590 | if (ha->mqenable) |
576 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | 591 | RD_REG_DWORD(®->isp25mq.req_q_out); |
577 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | 592 | else { |
578 | } else { | 593 | if (IS_FWI2_CAPABLE(ha)) { |
579 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), req->ring_index); | 594 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); |
580 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | 595 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); |
596 | } else { | ||
597 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | ||
598 | req->ring_index); | ||
599 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | ||
600 | } | ||
581 | } | 601 | } |
582 | 602 | ||
583 | } | 603 | } |
@@ -622,6 +642,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
622 | struct scsi_cmnd *cmd; | 642 | struct scsi_cmnd *cmd; |
623 | struct scatterlist *sg; | 643 | struct scatterlist *sg; |
624 | int i; | 644 | int i; |
645 | uint16_t que_id; | ||
646 | struct req_que *req; | ||
625 | 647 | ||
626 | cmd = sp->cmd; | 648 | cmd = sp->cmd; |
627 | 649 | ||
@@ -636,6 +658,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
636 | } | 658 | } |
637 | 659 | ||
638 | vha = sp->vha; | 660 | vha = sp->vha; |
661 | que_id = vha->req_ques[0]; | ||
662 | req = vha->hw->req_q_map[que_id]; | ||
639 | 663 | ||
640 | /* Set transfer direction */ | 664 | /* Set transfer direction */ |
641 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 665 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
@@ -666,7 +690,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
666 | * Five DSDs are available in the Continuation | 690 | * Five DSDs are available in the Continuation |
667 | * Type 1 IOCB. | 691 | * Type 1 IOCB. |
668 | */ | 692 | */ |
669 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 693 | cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); |
670 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 694 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
671 | avail_dsds = 5; | 695 | avail_dsds = 5; |
672 | } | 696 | } |
@@ -691,8 +715,6 @@ qla24xx_start_scsi(srb_t *sp) | |||
691 | { | 715 | { |
692 | int ret, nseg; | 716 | int ret, nseg; |
693 | unsigned long flags; | 717 | unsigned long flags; |
694 | scsi_qla_host_t *vha; | ||
695 | struct scsi_cmnd *cmd; | ||
696 | uint32_t *clr_ptr; | 718 | uint32_t *clr_ptr; |
697 | uint32_t index; | 719 | uint32_t index; |
698 | uint32_t handle; | 720 | uint32_t handle; |
@@ -700,23 +722,32 @@ qla24xx_start_scsi(srb_t *sp) | |||
700 | uint16_t cnt; | 722 | uint16_t cnt; |
701 | uint16_t req_cnt; | 723 | uint16_t req_cnt; |
702 | uint16_t tot_dsds; | 724 | uint16_t tot_dsds; |
703 | struct device_reg_24xx __iomem *reg; | 725 | struct req_que *req = NULL; |
704 | struct qla_hw_data *ha; | 726 | struct rsp_que *rsp = NULL; |
705 | struct req_que *req; | 727 | struct scsi_cmnd *cmd = sp->cmd; |
728 | struct scsi_qla_host *vha = sp->vha; | ||
729 | struct qla_hw_data *ha = vha->hw; | ||
730 | device_reg_t __iomem *reg; | ||
731 | uint16_t que_id; | ||
706 | 732 | ||
707 | /* Setup device pointers. */ | 733 | /* Setup device pointers. */ |
708 | ret = 0; | 734 | ret = 0; |
709 | vha = sp->vha; | 735 | que_id = vha->req_ques[0]; |
710 | ha = vha->hw; | 736 | |
711 | reg = &ha->iobase->isp24; | 737 | req = ha->req_q_map[que_id]; |
712 | cmd = sp->cmd; | 738 | reg = ISP_QUE_REG(ha, req->id); |
713 | req = ha->req; | 739 | |
740 | if (req->rsp) | ||
741 | rsp = req->rsp; | ||
742 | else | ||
743 | rsp = ha->rsp_q_map[que_id]; | ||
714 | /* So we know we haven't pci_map'ed anything yet */ | 744 | /* So we know we haven't pci_map'ed anything yet */ |
715 | tot_dsds = 0; | 745 | tot_dsds = 0; |
716 | 746 | ||
717 | /* Send marker if required */ | 747 | /* Send marker if required */ |
718 | if (vha->marker_needed != 0) { | 748 | if (vha->marker_needed != 0) { |
719 | if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | 749 | if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) |
750 | != QLA_SUCCESS) | ||
720 | return QLA_FUNCTION_FAILED; | 751 | return QLA_FUNCTION_FAILED; |
721 | vha->marker_needed = 0; | 752 | vha->marker_needed = 0; |
722 | } | 753 | } |
@@ -749,7 +780,13 @@ qla24xx_start_scsi(srb_t *sp) | |||
749 | 780 | ||
750 | req_cnt = qla24xx_calc_iocbs(tot_dsds); | 781 | req_cnt = qla24xx_calc_iocbs(tot_dsds); |
751 | if (req->cnt < (req_cnt + 2)) { | 782 | if (req->cnt < (req_cnt + 2)) { |
752 | cnt = (uint16_t)RD_REG_DWORD_RELAXED(®->req_q_out); | 783 | if (ha->mqenable) |
784 | cnt = (uint16_t) | ||
785 | RD_REG_DWORD_RELAXED(®->isp25mq.req_q_out); | ||
786 | else | ||
787 | cnt = (uint16_t) | ||
788 | RD_REG_DWORD_RELAXED(®->isp24.req_q_out); | ||
789 | |||
753 | if (req->ring_index < cnt) | 790 | if (req->ring_index < cnt) |
754 | req->cnt = cnt - req->ring_index; | 791 | req->cnt = cnt - req->ring_index; |
755 | else | 792 | else |
@@ -809,13 +846,17 @@ qla24xx_start_scsi(srb_t *sp) | |||
809 | sp->flags |= SRB_DMA_VALID; | 846 | sp->flags |= SRB_DMA_VALID; |
810 | 847 | ||
811 | /* Set chip new ring index. */ | 848 | /* Set chip new ring index. */ |
812 | WRT_REG_DWORD(®->req_q_in, req->ring_index); | 849 | if (ha->mqenable) |
813 | RD_REG_DWORD_RELAXED(®->req_q_in); /* PCI Posting. */ | 850 | WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); |
851 | else { | ||
852 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | ||
853 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | ||
854 | } | ||
814 | 855 | ||
815 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | 856 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
816 | if (vha->flags.process_response_queue && | 857 | if (vha->flags.process_response_queue && |
817 | ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 858 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
818 | qla24xx_process_response_queue(vha); | 859 | qla24xx_process_response_queue(rsp); |
819 | 860 | ||
820 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 861 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
821 | return QLA_SUCCESS; | 862 | return QLA_SUCCESS; |
@@ -828,3 +869,4 @@ queuing_error: | |||
828 | 869 | ||
829 | return QLA_FUNCTION_FAILED; | 870 | return QLA_FUNCTION_FAILED; |
830 | } | 871 | } |
872 | |||