aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorGiridhar Malavali <giridhar.malavali@qlogic.com>2011-11-18 12:03:18 -0500
committerJames Bottomley <JBottomley@Parallels.com>2011-12-15 01:55:10 -0500
commit5162cf0c4e3962b28a9c8bc1ce89d266db67aa55 (patch)
tree2227318fbacfa974c1aea9d8f4d645f9e659711a /drivers/scsi/qla2xxx
parent99b8212c491913bd077063c138676ad5af8c6d3d (diff)
[SCSI] qla2xxx: Consolidated IOCB processing routines.
Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com> Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c618
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c482
4 files changed, 522 insertions, 583 deletions
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index dc454628f0a1..7c54624b5b13 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,7 +14,8 @@
14 * | Module Init and Probe | 0x0116 | 0xfa | 14 * | Module Init and Probe | 0x0116 | 0xfa |
15 * | Mailbox commands | 0x112b | | 15 * | Mailbox commands | 0x112b | |
16 * | Device Discovery | 0x2084 | | 16 * | Device Discovery | 0x2084 | |
17 * | Queue Command and IO tracing | 0x302f | 0x3008 | 17 * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
18 * | | | 0x302e |
18 * | DPC Thread | 0x401c | | 19 * | DPC Thread | 0x401c | |
19 * | Async Events | 0x5057 | 0x5052 | 20 * | Async Events | 0x5057 | 0x5052 |
20 * | Timer Routines | 0x6011 | 0x600e,0x600f | 21 * | Timer Routines | 0x6011 | 0x600e,0x600f |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c0c11afb685c..408679be8fdf 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -572,7 +572,7 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
572 size_t, char *); 572 size_t, char *);
573extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); 573extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
575extern void qla82xx_start_iocbs(srb_t *); 575extern void qla82xx_start_iocbs(scsi_qla_host_t *);
576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
577extern int qla82xx_check_md_needed(scsi_qla_host_t *); 577extern int qla82xx_check_md_needed(scsi_qla_host_t *);
578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 64a3075c3b47..d2cc0ccfc202 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,6 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16static void qla25xx_set_que(srb_t *, struct rsp_que **); 14static void qla25xx_set_que(srb_t *, struct rsp_que **);
17/** 15/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -468,6 +466,42 @@ queuing_error:
468} 466}
469 467
470/** 468/**
469 * qla2x00_start_iocbs() - Execute the IOCB command
470 */
471static void
472qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473{
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477
478 if (IS_QLA82XX(ha)) {
479 qla82xx_start_iocbs(vha);
480 } else {
481 /* Adjust ring index. */
482 req->ring_index++;
483 if (req->ring_index == req->length) {
484 req->ring_index = 0;
485 req->ring_ptr = req->ring;
486 } else
487 req->ring_ptr++;
488
489 /* Set chip new ring index. */
490 if (ha->mqenable) {
491 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
492 RD_REG_DWORD(&ioreg->hccr);
493 } else if (IS_FWI2_CAPABLE(ha)) {
494 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
496 } else {
497 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
498 req->ring_index);
499 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
500 }
501 }
502}
503
504/**
471 * qla2x00_marker() - Send a marker IOCB to the firmware. 505 * qla2x00_marker() - Send a marker IOCB to the firmware.
472 * @ha: HA context 506 * @ha: HA context
473 * @loop_id: loop ID 507 * @loop_id: loop ID
@@ -516,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
516 } 550 }
517 wmb(); 551 wmb();
518 552
519 qla2x00_isp_cmd(vha, req); 553 qla2x00_start_iocbs(vha, req);
520 554
521 return (QLA_SUCCESS); 555 return (QLA_SUCCESS);
522} 556}
@@ -537,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
537} 571}
538 572
539/** 573/**
540 * qla2x00_isp_cmd() - Modify the request ring pointer. 574 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
541 * @ha: HA context 575 * Continuation Type 1 IOCBs to allocate.
542 * 576 *
543 * Note: The caller must hold the hardware lock before calling this routine. 577 * @dsds: number of data segment decriptors needed
578 *
579 * Returns the number of IOCB entries needed to store @dsds.
544 */ 580 */
545static void 581inline uint16_t
546qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) 582qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
547{ 583{
548 struct qla_hw_data *ha = vha->hw; 584 uint16_t iocbs;
549 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
550 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
551 585
552 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d, 586 iocbs = 1;
553 "IOCB data:\n"); 587 if (dsds > 1) {
554 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 588 iocbs += (dsds - 1) / 5;
555 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE); 589 if ((dsds - 1) % 5)
590 iocbs++;
591 }
592 return iocbs;
593}
556 594
557 /* Adjust ring index. */ 595static inline int
558 req->ring_index++; 596qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
559 if (req->ring_index == req->length) { 597 uint16_t tot_dsds)
560 req->ring_index = 0; 598{
561 req->ring_ptr = req->ring; 599 uint32_t *cur_dsd = NULL;
562 } else 600 scsi_qla_host_t *vha;
563 req->ring_ptr++; 601 struct qla_hw_data *ha;
602 struct scsi_cmnd *cmd;
603 struct scatterlist *cur_seg;
604 uint32_t *dsd_seg;
605 void *next_dsd;
606 uint8_t avail_dsds;
607 uint8_t first_iocb = 1;
608 uint32_t dsd_list_len;
609 struct dsd_dma *dsd_ptr;
610 struct ct6_dsd *ctx;
564 611
565 /* Set chip new ring index. */ 612 cmd = sp->cmd;
566 if (IS_QLA82XX(ha)) {
567 uint32_t dbval = 0x04 | (ha->portnum << 5);
568 613
569 /* write, read and verify logic */ 614 /* Update entry type to indicate Command Type 3 IOCB */
570 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 615 *((uint32_t *)(&cmd_pkt->entry_type)) =
571 if (ql2xdbwr) 616 __constant_cpu_to_le32(COMMAND_TYPE_6);
572 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 617
573 else { 618 /* No data transfer */
574 WRT_REG_DWORD( 619 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
575 (unsigned long __iomem *)ha->nxdb_wr_ptr, 620 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
576 dbval); 621 return 0;
577 wmb(); 622 }
578 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 623
579 WRT_REG_DWORD((unsigned long __iomem *) 624 vha = sp->fcport->vha;
580 ha->nxdb_wr_ptr, dbval); 625 ha = vha->hw;
581 wmb(); 626
582 } 627 /* Set transfer direction */
583 } 628 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
584 } else if (ha->mqenable) { 629 cmd_pkt->control_flags =
585 /* Set chip new ring index. */ 630 __constant_cpu_to_le16(CF_WRITE_DATA);
586 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index); 631 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
587 RD_REG_DWORD(&ioreg->hccr); 632 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
588 } else { 633 cmd_pkt->control_flags =
589 if (IS_FWI2_CAPABLE(ha)) { 634 __constant_cpu_to_le16(CF_READ_DATA);
590 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); 635 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
591 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 636 }
637
638 cur_seg = scsi_sglist(cmd);
639 ctx = sp->ctx;
640
641 while (tot_dsds) {
642 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
643 QLA_DSDS_PER_IOCB : tot_dsds;
644 tot_dsds -= avail_dsds;
645 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
646
647 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
648 struct dsd_dma, list);
649 next_dsd = dsd_ptr->dsd_addr;
650 list_del(&dsd_ptr->list);
651 ha->gbl_dsd_avail--;
652 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
653 ctx->dsd_use_cnt++;
654 ha->gbl_dsd_inuse++;
655
656 if (first_iocb) {
657 first_iocb = 0;
658 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
659 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
660 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
661 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
592 } else { 662 } else {
593 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), 663 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
594 req->ring_index); 664 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
595 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 665 *cur_dsd++ = cpu_to_le32(dsd_list_len);
666 }
667 cur_dsd = (uint32_t *)next_dsd;
668 while (avail_dsds) {
669 dma_addr_t sle_dma;
670
671 sle_dma = sg_dma_address(cur_seg);
672 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
673 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
674 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
675 cur_seg = sg_next(cur_seg);
676 avail_dsds--;
596 } 677 }
597 } 678 }
598 679
680 /* Null termination */
681 *cur_dsd++ = 0;
682 *cur_dsd++ = 0;
683 *cur_dsd++ = 0;
684 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
685 return 0;
599} 686}
600 687
601/** 688/*
602 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 689 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
603 * Continuation Type 1 IOCBs to allocate. 690 * for Command Type 6.
604 * 691 *
605 * @dsds: number of data segment decriptors needed 692 * @dsds: number of data segment decriptors needed
606 * 693 *
607 * Returns the number of IOCB entries needed to store @dsds. 694 * Returns the number of dsd list needed to store @dsds.
608 */ 695 */
609inline uint16_t 696inline uint16_t
610qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) 697qla24xx_calc_dsd_lists(uint16_t dsds)
611{ 698{
612 uint16_t iocbs; 699 uint16_t dsd_lists = 0;
613 700
614 iocbs = 1; 701 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
615 if (dsds > 1) { 702 if (dsds % QLA_DSDS_PER_IOCB)
616 iocbs += (dsds - 1) / 5; 703 dsd_lists++;
617 if ((dsds - 1) % 5) 704 return dsd_lists;
618 iocbs++;
619 }
620 return iocbs;
621} 705}
622 706
707
623/** 708/**
624 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 709 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
625 * IOCB types. 710 * IOCB types.
@@ -946,6 +1031,7 @@ alloc_and_fill:
946 *cur_dsd++ = 0; 1031 *cur_dsd++ = 0;
947 return 0; 1032 return 0;
948} 1033}
1034
949static int 1035static int
950qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1036qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
951 uint16_t tot_dsds) 1037 uint16_t tot_dsds)
@@ -1794,42 +1880,6 @@ queuing_error:
1794} 1880}
1795 1881
1796static void 1882static void
1797qla2x00_start_iocbs(srb_t *sp)
1798{
1799 struct qla_hw_data *ha = sp->fcport->vha->hw;
1800 struct req_que *req = ha->req_q_map[0];
1801 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1802 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1803
1804 if (IS_QLA82XX(ha)) {
1805 qla82xx_start_iocbs(sp);
1806 } else {
1807 /* Adjust ring index. */
1808 req->ring_index++;
1809 if (req->ring_index == req->length) {
1810 req->ring_index = 0;
1811 req->ring_ptr = req->ring;
1812 } else
1813 req->ring_ptr++;
1814
1815 /* Set chip new ring index. */
1816 if (ha->mqenable) {
1817 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1818 RD_REG_DWORD(&ioreg->hccr);
1819 } else if (IS_QLA82XX(ha)) {
1820 qla82xx_start_iocbs(sp);
1821 } else if (IS_FWI2_CAPABLE(ha)) {
1822 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1823 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1824 } else {
1825 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1826 req->ring_index);
1827 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1828 }
1829 }
1830}
1831
1832static void
1833qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1883qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1834{ 1884{
1835 struct srb_ctx *ctx = sp->ctx; 1885 struct srb_ctx *ctx = sp->ctx;
@@ -2161,6 +2211,372 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2161 ct_iocb->entry_count = entry_count; 2211 ct_iocb->entry_count = entry_count;
2162} 2212}
2163 2213
2214/*
2215 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2216 * @sp: command to send to the ISP
2217 *
2218 * Returns non-zero if a failure occurred, else zero.
2219 */
2220int
2221qla82xx_start_scsi(srb_t *sp)
2222{
2223 int ret, nseg;
2224 unsigned long flags;
2225 struct scsi_cmnd *cmd;
2226 uint32_t *clr_ptr;
2227 uint32_t index;
2228 uint32_t handle;
2229 uint16_t cnt;
2230 uint16_t req_cnt;
2231 uint16_t tot_dsds;
2232 struct device_reg_82xx __iomem *reg;
2233 uint32_t dbval;
2234 uint32_t *fcp_dl;
2235 uint8_t additional_cdb_len;
2236 struct ct6_dsd *ctx;
2237 struct scsi_qla_host *vha = sp->fcport->vha;
2238 struct qla_hw_data *ha = vha->hw;
2239 struct req_que *req = NULL;
2240 struct rsp_que *rsp = NULL;
2241 char tag[2];
2242
2243 /* Setup device pointers. */
2244 ret = 0;
2245 reg = &ha->iobase->isp82;
2246 cmd = sp->cmd;
2247 req = vha->req;
2248 rsp = ha->rsp_q_map[0];
2249
2250 /* So we know we haven't pci_map'ed anything yet */
2251 tot_dsds = 0;
2252
2253 dbval = 0x04 | (ha->portnum << 5);
2254
2255 /* Send marker if required */
2256 if (vha->marker_needed != 0) {
2257 if (qla2x00_marker(vha, req,
2258 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2259 ql_log(ql_log_warn, vha, 0x300c,
2260 "qla2x00_marker failed for cmd=%p.\n", cmd);
2261 return QLA_FUNCTION_FAILED;
2262 }
2263 vha->marker_needed = 0;
2264 }
2265
2266 /* Acquire ring specific lock */
2267 spin_lock_irqsave(&ha->hardware_lock, flags);
2268
2269 /* Check for room in outstanding command list. */
2270 handle = req->current_outstanding_cmd;
2271 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2272 handle++;
2273 if (handle == MAX_OUTSTANDING_COMMANDS)
2274 handle = 1;
2275 if (!req->outstanding_cmds[handle])
2276 break;
2277 }
2278 if (index == MAX_OUTSTANDING_COMMANDS)
2279 goto queuing_error;
2280
2281 /* Map the sg table so we have an accurate count of sg entries needed */
2282 if (scsi_sg_count(cmd)) {
2283 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2284 scsi_sg_count(cmd), cmd->sc_data_direction);
2285 if (unlikely(!nseg))
2286 goto queuing_error;
2287 } else
2288 nseg = 0;
2289
2290 tot_dsds = nseg;
2291
2292 if (tot_dsds > ql2xshiftctondsd) {
2293 struct cmd_type_6 *cmd_pkt;
2294 uint16_t more_dsd_lists = 0;
2295 struct dsd_dma *dsd_ptr;
2296 uint16_t i;
2297
2298 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2299 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2300 ql_dbg(ql_dbg_io, vha, 0x300d,
2301 "Num of DSD list %d is than %d for cmd=%p.\n",
2302 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2303 cmd);
2304 goto queuing_error;
2305 }
2306
2307 if (more_dsd_lists <= ha->gbl_dsd_avail)
2308 goto sufficient_dsds;
2309 else
2310 more_dsd_lists -= ha->gbl_dsd_avail;
2311
2312 for (i = 0; i < more_dsd_lists; i++) {
2313 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2314 if (!dsd_ptr) {
2315 ql_log(ql_log_fatal, vha, 0x300e,
2316 "Failed to allocate memory for dsd_dma "
2317 "for cmd=%p.\n", cmd);
2318 goto queuing_error;
2319 }
2320
2321 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2322 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2323 if (!dsd_ptr->dsd_addr) {
2324 kfree(dsd_ptr);
2325 ql_log(ql_log_fatal, vha, 0x300f,
2326 "Failed to allocate memory for dsd_addr "
2327 "for cmd=%p.\n", cmd);
2328 goto queuing_error;
2329 }
2330 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2331 ha->gbl_dsd_avail++;
2332 }
2333
2334sufficient_dsds:
2335 req_cnt = 1;
2336
2337 if (req->cnt < (req_cnt + 2)) {
2338 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2339 &reg->req_q_out[0]);
2340 if (req->ring_index < cnt)
2341 req->cnt = cnt - req->ring_index;
2342 else
2343 req->cnt = req->length -
2344 (req->ring_index - cnt);
2345 }
2346
2347 if (req->cnt < (req_cnt + 2))
2348 goto queuing_error;
2349
2350 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2351 if (!sp->ctx) {
2352 ql_log(ql_log_fatal, vha, 0x3010,
2353 "Failed to allocate ctx for cmd=%p.\n", cmd);
2354 goto queuing_error;
2355 }
2356 memset(ctx, 0, sizeof(struct ct6_dsd));
2357 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2358 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2359 if (!ctx->fcp_cmnd) {
2360 ql_log(ql_log_fatal, vha, 0x3011,
2361 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2362 goto queuing_error_fcp_cmnd;
2363 }
2364
2365 /* Initialize the DSD list and dma handle */
2366 INIT_LIST_HEAD(&ctx->dsd_list);
2367 ctx->dsd_use_cnt = 0;
2368
2369 if (cmd->cmd_len > 16) {
2370 additional_cdb_len = cmd->cmd_len - 16;
2371 if ((cmd->cmd_len % 4) != 0) {
2372 /* SCSI command bigger than 16 bytes must be
2373 * multiple of 4
2374 */
2375 ql_log(ql_log_warn, vha, 0x3012,
2376 "scsi cmd len %d not multiple of 4 "
2377 "for cmd=%p.\n", cmd->cmd_len, cmd);
2378 goto queuing_error_fcp_cmnd;
2379 }
2380 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2381 } else {
2382 additional_cdb_len = 0;
2383 ctx->fcp_cmnd_len = 12 + 16 + 4;
2384 }
2385
2386 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2387 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2388
2389 /* Zero out remaining portion of packet. */
2390 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2391 clr_ptr = (uint32_t *)cmd_pkt + 2;
2392 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2393 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2394
2395 /* Set NPORT-ID and LUN number*/
2396 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2397 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2398 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2399 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2400 cmd_pkt->vp_index = sp->fcport->vp_idx;
2401
2402 /* Build IOCB segments */
2403 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2404 goto queuing_error_fcp_cmnd;
2405
2406 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2407 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2408
2409 /* build FCP_CMND IU */
2410 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2411 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2412 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2413
2414 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2415 ctx->fcp_cmnd->additional_cdb_len |= 1;
2416 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2417 ctx->fcp_cmnd->additional_cdb_len |= 2;
2418
2419 /*
2420 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2421 */
2422 if (scsi_populate_tag_msg(cmd, tag)) {
2423 switch (tag[0]) {
2424 case HEAD_OF_QUEUE_TAG:
2425 ctx->fcp_cmnd->task_attribute =
2426 TSK_HEAD_OF_QUEUE;
2427 break;
2428 case ORDERED_QUEUE_TAG:
2429 ctx->fcp_cmnd->task_attribute =
2430 TSK_ORDERED;
2431 break;
2432 }
2433 }
2434
2435 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2436
2437 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2438 additional_cdb_len);
2439 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2440
2441 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2442 cmd_pkt->fcp_cmnd_dseg_address[0] =
2443 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2444 cmd_pkt->fcp_cmnd_dseg_address[1] =
2445 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2446
2447 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2448 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2449 /* Set total data segment count. */
2450 cmd_pkt->entry_count = (uint8_t)req_cnt;
2451 /* Specify response queue number where
2452 * completion should happen
2453 */
2454 cmd_pkt->entry_status = (uint8_t) rsp->id;
2455 } else {
2456 struct cmd_type_7 *cmd_pkt;
2457 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2458 if (req->cnt < (req_cnt + 2)) {
2459 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2460 &reg->req_q_out[0]);
2461 if (req->ring_index < cnt)
2462 req->cnt = cnt - req->ring_index;
2463 else
2464 req->cnt = req->length -
2465 (req->ring_index - cnt);
2466 }
2467 if (req->cnt < (req_cnt + 2))
2468 goto queuing_error;
2469
2470 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2471 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2472
2473 /* Zero out remaining portion of packet. */
2474 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2475 clr_ptr = (uint32_t *)cmd_pkt + 2;
2476 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2477 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2478
2479 /* Set NPORT-ID and LUN number*/
2480 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2481 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2482 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2483 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2484 cmd_pkt->vp_index = sp->fcport->vp_idx;
2485
2486 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2487 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2488 sizeof(cmd_pkt->lun));
2489
2490 /*
2491 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2492 */
2493 if (scsi_populate_tag_msg(cmd, tag)) {
2494 switch (tag[0]) {
2495 case HEAD_OF_QUEUE_TAG:
2496 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2497 break;
2498 case ORDERED_QUEUE_TAG:
2499 cmd_pkt->task = TSK_ORDERED;
2500 break;
2501 }
2502 }
2503
2504 /* Load SCSI command packet. */
2505 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2506 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2507
2508 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2509
2510 /* Build IOCB segments */
2511 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2512
2513 /* Set total data segment count. */
2514 cmd_pkt->entry_count = (uint8_t)req_cnt;
2515 /* Specify response queue number where
2516 * completion should happen.
2517 */
2518 cmd_pkt->entry_status = (uint8_t) rsp->id;
2519
2520 }
2521 /* Build command packet. */
2522 req->current_outstanding_cmd = handle;
2523 req->outstanding_cmds[handle] = sp;
2524 sp->handle = handle;
2525 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2526 req->cnt -= req_cnt;
2527 wmb();
2528
2529 /* Adjust ring index. */
2530 req->ring_index++;
2531 if (req->ring_index == req->length) {
2532 req->ring_index = 0;
2533 req->ring_ptr = req->ring;
2534 } else
2535 req->ring_ptr++;
2536
2537 sp->flags |= SRB_DMA_VALID;
2538
2539 /* Set chip new ring index. */
2540 /* write, read and verify logic */
2541 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2542 if (ql2xdbwr)
2543 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2544 else {
2545 WRT_REG_DWORD(
2546 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2547 dbval);
2548 wmb();
2549 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2550 WRT_REG_DWORD(
2551 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2552 dbval);
2553 wmb();
2554 }
2555 }
2556
2557 /* Manage unprocessed RIO/ZIO commands in response queue. */
2558 if (vha->flags.process_response_queue &&
2559 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2560 qla24xx_process_response_queue(vha, rsp);
2561
2562 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2563 return QLA_SUCCESS;
2564
2565queuing_error_fcp_cmnd:
2566 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2567queuing_error:
2568 if (tot_dsds)
2569 scsi_dma_unmap(cmd);
2570
2571 if (sp->ctx) {
2572 mempool_free(sp->ctx, ha->ctx_mempool);
2573 sp->ctx = NULL;
2574 }
2575 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2576
2577 return QLA_FUNCTION_FAILED;
2578}
2579
2164int 2580int
2165qla2x00_start_sp(srb_t *sp) 2581qla2x00_start_sp(srb_t *sp)
2166{ 2582{
@@ -2213,7 +2629,7 @@ qla2x00_start_sp(srb_t *sp)
2213 } 2629 }
2214 2630
2215 wmb(); 2631 wmb();
2216 qla2x00_start_iocbs(sp); 2632 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2217done: 2633done:
2218 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2634 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2219 return rval; 2635 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 805b4b734754..24447ddb4b66 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2539,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2539 return qla82xx_check_rcvpeg_state(ha); 2539 return qla82xx_check_rcvpeg_state(ha);
2540} 2540}
2541 2541
2542static inline int
2543qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2544 uint16_t tot_dsds)
2545{
2546 uint32_t *cur_dsd = NULL;
2547 scsi_qla_host_t *vha;
2548 struct qla_hw_data *ha;
2549 struct scsi_cmnd *cmd;
2550 struct scatterlist *cur_seg;
2551 uint32_t *dsd_seg;
2552 void *next_dsd;
2553 uint8_t avail_dsds;
2554 uint8_t first_iocb = 1;
2555 uint32_t dsd_list_len;
2556 struct dsd_dma *dsd_ptr;
2557 struct ct6_dsd *ctx;
2558
2559 cmd = sp->cmd;
2560
2561 /* Update entry type to indicate Command Type 3 IOCB */
2562 *((uint32_t *)(&cmd_pkt->entry_type)) =
2563 __constant_cpu_to_le32(COMMAND_TYPE_6);
2564
2565 /* No data transfer */
2566 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2567 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2568 return 0;
2569 }
2570
2571 vha = sp->fcport->vha;
2572 ha = vha->hw;
2573
2574 /* Set transfer direction */
2575 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2576 cmd_pkt->control_flags =
2577 __constant_cpu_to_le16(CF_WRITE_DATA);
2578 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2579 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2580 cmd_pkt->control_flags =
2581 __constant_cpu_to_le16(CF_READ_DATA);
2582 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2583 }
2584
2585 cur_seg = scsi_sglist(cmd);
2586 ctx = sp->ctx;
2587
2588 while (tot_dsds) {
2589 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2590 QLA_DSDS_PER_IOCB : tot_dsds;
2591 tot_dsds -= avail_dsds;
2592 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2593
2594 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2595 struct dsd_dma, list);
2596 next_dsd = dsd_ptr->dsd_addr;
2597 list_del(&dsd_ptr->list);
2598 ha->gbl_dsd_avail--;
2599 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2600 ctx->dsd_use_cnt++;
2601 ha->gbl_dsd_inuse++;
2602
2603 if (first_iocb) {
2604 first_iocb = 0;
2605 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2606 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2607 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2608 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
2609 } else {
2610 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2611 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2612 *cur_dsd++ = cpu_to_le32(dsd_list_len);
2613 }
2614 cur_dsd = (uint32_t *)next_dsd;
2615 while (avail_dsds) {
2616 dma_addr_t sle_dma;
2617
2618 sle_dma = sg_dma_address(cur_seg);
2619 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2620 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2621 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2622 cur_seg = sg_next(cur_seg);
2623 avail_dsds--;
2624 }
2625 }
2626
2627 /* Null termination */
2628 *cur_dsd++ = 0;
2629 *cur_dsd++ = 0;
2630 *cur_dsd++ = 0;
2631 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2632 return 0;
2633}
2634
2635/*
2636 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2637 * for Command Type 6.
2638 *
2639 * @dsds: number of data segment decriptors needed
2640 *
2641 * Returns the number of dsd list needed to store @dsds.
2642 */
2643inline uint16_t
2644qla82xx_calc_dsd_lists(uint16_t dsds)
2645{
2646 uint16_t dsd_lists = 0;
2647
2648 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2649 if (dsds % QLA_DSDS_PER_IOCB)
2650 dsd_lists++;
2651 return dsd_lists;
2652}
2653
2654/*
2655 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2656 * @sp: command to send to the ISP
2657 *
2658 * Returns non-zero if a failure occurred, else zero.
2659 */
2660int
2661qla82xx_start_scsi(srb_t *sp)
2662{
2663 int ret, nseg;
2664 unsigned long flags;
2665 struct scsi_cmnd *cmd;
2666 uint32_t *clr_ptr;
2667 uint32_t index;
2668 uint32_t handle;
2669 uint16_t cnt;
2670 uint16_t req_cnt;
2671 uint16_t tot_dsds;
2672 struct device_reg_82xx __iomem *reg;
2673 uint32_t dbval;
2674 uint32_t *fcp_dl;
2675 uint8_t additional_cdb_len;
2676 struct ct6_dsd *ctx;
2677 struct scsi_qla_host *vha = sp->fcport->vha;
2678 struct qla_hw_data *ha = vha->hw;
2679 struct req_que *req = NULL;
2680 struct rsp_que *rsp = NULL;
2681 char tag[2];
2682
2683 /* Setup device pointers. */
2684 ret = 0;
2685 reg = &ha->iobase->isp82;
2686 cmd = sp->cmd;
2687 req = vha->req;
2688 rsp = ha->rsp_q_map[0];
2689
2690 /* So we know we haven't pci_map'ed anything yet */
2691 tot_dsds = 0;
2692
2693 dbval = 0x04 | (ha->portnum << 5);
2694
2695 /* Send marker if required */
2696 if (vha->marker_needed != 0) {
2697 if (qla2x00_marker(vha, req,
2698 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2699 ql_log(ql_log_warn, vha, 0x300c,
2700 "qla2x00_marker failed for cmd=%p.\n", cmd);
2701 return QLA_FUNCTION_FAILED;
2702 }
2703 vha->marker_needed = 0;
2704 }
2705
2706 /* Acquire ring specific lock */
2707 spin_lock_irqsave(&ha->hardware_lock, flags);
2708
2709 /* Check for room in outstanding command list. */
2710 handle = req->current_outstanding_cmd;
2711 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2712 handle++;
2713 if (handle == MAX_OUTSTANDING_COMMANDS)
2714 handle = 1;
2715 if (!req->outstanding_cmds[handle])
2716 break;
2717 }
2718 if (index == MAX_OUTSTANDING_COMMANDS)
2719 goto queuing_error;
2720
2721 /* Map the sg table so we have an accurate count of sg entries needed */
2722 if (scsi_sg_count(cmd)) {
2723 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2724 scsi_sg_count(cmd), cmd->sc_data_direction);
2725 if (unlikely(!nseg))
2726 goto queuing_error;
2727 } else
2728 nseg = 0;
2729
2730 tot_dsds = nseg;
2731
2732 if (tot_dsds > ql2xshiftctondsd) {
2733 struct cmd_type_6 *cmd_pkt;
2734 uint16_t more_dsd_lists = 0;
2735 struct dsd_dma *dsd_ptr;
2736 uint16_t i;
2737
2738 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2739 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2740 ql_dbg(ql_dbg_io, vha, 0x300d,
2741 "Num of DSD list %d is than %d for cmd=%p.\n",
2742 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2743 cmd);
2744 goto queuing_error;
2745 }
2746
2747 if (more_dsd_lists <= ha->gbl_dsd_avail)
2748 goto sufficient_dsds;
2749 else
2750 more_dsd_lists -= ha->gbl_dsd_avail;
2751
2752 for (i = 0; i < more_dsd_lists; i++) {
2753 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2754 if (!dsd_ptr) {
2755 ql_log(ql_log_fatal, vha, 0x300e,
2756 "Failed to allocate memory for dsd_dma "
2757 "for cmd=%p.\n", cmd);
2758 goto queuing_error;
2759 }
2760
2761 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2762 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2763 if (!dsd_ptr->dsd_addr) {
2764 kfree(dsd_ptr);
2765 ql_log(ql_log_fatal, vha, 0x300f,
2766 "Failed to allocate memory for dsd_addr "
2767 "for cmd=%p.\n", cmd);
2768 goto queuing_error;
2769 }
2770 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2771 ha->gbl_dsd_avail++;
2772 }
2773
2774sufficient_dsds:
2775 req_cnt = 1;
2776
2777 if (req->cnt < (req_cnt + 2)) {
2778 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2779 &reg->req_q_out[0]);
2780 if (req->ring_index < cnt)
2781 req->cnt = cnt - req->ring_index;
2782 else
2783 req->cnt = req->length -
2784 (req->ring_index - cnt);
2785 }
2786
2787 if (req->cnt < (req_cnt + 2))
2788 goto queuing_error;
2789
2790 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2791 if (!sp->ctx) {
2792 ql_log(ql_log_fatal, vha, 0x3010,
2793 "Failed to allocate ctx for cmd=%p.\n", cmd);
2794 goto queuing_error;
2795 }
2796 memset(ctx, 0, sizeof(struct ct6_dsd));
2797 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2798 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2799 if (!ctx->fcp_cmnd) {
2800 ql_log(ql_log_fatal, vha, 0x3011,
2801 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2802 goto queuing_error_fcp_cmnd;
2803 }
2804
2805 /* Initialize the DSD list and dma handle */
2806 INIT_LIST_HEAD(&ctx->dsd_list);
2807 ctx->dsd_use_cnt = 0;
2808
2809 if (cmd->cmd_len > 16) {
2810 additional_cdb_len = cmd->cmd_len - 16;
2811 if ((cmd->cmd_len % 4) != 0) {
2812 /* SCSI command bigger than 16 bytes must be
2813 * multiple of 4
2814 */
2815 ql_log(ql_log_warn, vha, 0x3012,
2816 "scsi cmd len %d not multiple of 4 "
2817 "for cmd=%p.\n", cmd->cmd_len, cmd);
2818 goto queuing_error_fcp_cmnd;
2819 }
2820 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2821 } else {
2822 additional_cdb_len = 0;
2823 ctx->fcp_cmnd_len = 12 + 16 + 4;
2824 }
2825
2826 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2827 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2828
2829 /* Zero out remaining portion of packet. */
2830 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2831 clr_ptr = (uint32_t *)cmd_pkt + 2;
2832 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2833 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2834
2835 /* Set NPORT-ID and LUN number*/
2836 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2837 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2838 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2839 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2840 cmd_pkt->vp_index = sp->fcport->vp_idx;
2841
2842 /* Build IOCB segments */
2843 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2844 goto queuing_error_fcp_cmnd;
2845
2846 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2847 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2848
2849 /* build FCP_CMND IU */
2850 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2851 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2852 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2853
2854 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2855 ctx->fcp_cmnd->additional_cdb_len |= 1;
2856 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2857 ctx->fcp_cmnd->additional_cdb_len |= 2;
2858
2859 /*
2860 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2861 */
2862 if (scsi_populate_tag_msg(cmd, tag)) {
2863 switch (tag[0]) {
2864 case HEAD_OF_QUEUE_TAG:
2865 ctx->fcp_cmnd->task_attribute =
2866 TSK_HEAD_OF_QUEUE;
2867 break;
2868 case ORDERED_QUEUE_TAG:
2869 ctx->fcp_cmnd->task_attribute =
2870 TSK_ORDERED;
2871 break;
2872 }
2873 }
2874
2875 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2876
2877 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2878 additional_cdb_len);
2879 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2880
2881 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2882 cmd_pkt->fcp_cmnd_dseg_address[0] =
2883 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2884 cmd_pkt->fcp_cmnd_dseg_address[1] =
2885 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2886
2887 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2888 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2889 /* Set total data segment count. */
2890 cmd_pkt->entry_count = (uint8_t)req_cnt;
2891 /* Specify response queue number where
2892 * completion should happen
2893 */
2894 cmd_pkt->entry_status = (uint8_t) rsp->id;
2895 } else {
2896 struct cmd_type_7 *cmd_pkt;
2897 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2898 if (req->cnt < (req_cnt + 2)) {
2899 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2900 &reg->req_q_out[0]);
2901 if (req->ring_index < cnt)
2902 req->cnt = cnt - req->ring_index;
2903 else
2904 req->cnt = req->length -
2905 (req->ring_index - cnt);
2906 }
2907 if (req->cnt < (req_cnt + 2))
2908 goto queuing_error;
2909
2910 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2911 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2912
2913 /* Zero out remaining portion of packet. */
2914 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2915 clr_ptr = (uint32_t *)cmd_pkt + 2;
2916 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2917 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2918
2919 /* Set NPORT-ID and LUN number*/
2920 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2921 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2922 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2923 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2924 cmd_pkt->vp_index = sp->fcport->vp_idx;
2925
2926 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2927 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2928 sizeof(cmd_pkt->lun));
2929
2930 /*
2931 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2932 */
2933 if (scsi_populate_tag_msg(cmd, tag)) {
2934 switch (tag[0]) {
2935 case HEAD_OF_QUEUE_TAG:
2936 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2937 break;
2938 case ORDERED_QUEUE_TAG:
2939 cmd_pkt->task = TSK_ORDERED;
2940 break;
2941 }
2942 }
2943
2944 /* Load SCSI command packet. */
2945 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2946 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2947
2948 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2949
2950 /* Build IOCB segments */
2951 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2952
2953 /* Set total data segment count. */
2954 cmd_pkt->entry_count = (uint8_t)req_cnt;
2955 /* Specify response queue number where
2956 * completion should happen.
2957 */
2958 cmd_pkt->entry_status = (uint8_t) rsp->id;
2959
2960 }
2961 /* Build command packet. */
2962 req->current_outstanding_cmd = handle;
2963 req->outstanding_cmds[handle] = sp;
2964 sp->handle = handle;
2965 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2966 req->cnt -= req_cnt;
2967 wmb();
2968
2969 /* Adjust ring index. */
2970 req->ring_index++;
2971 if (req->ring_index == req->length) {
2972 req->ring_index = 0;
2973 req->ring_ptr = req->ring;
2974 } else
2975 req->ring_ptr++;
2976
2977 sp->flags |= SRB_DMA_VALID;
2978
2979 /* Set chip new ring index. */
2980 /* write, read and verify logic */
2981 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2982 if (ql2xdbwr)
2983 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2984 else {
2985 WRT_REG_DWORD(
2986 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2987 dbval);
2988 wmb();
2989 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2990 WRT_REG_DWORD(
2991 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2992 dbval);
2993 wmb();
2994 }
2995 }
2996
2997 /* Manage unprocessed RIO/ZIO commands in response queue. */
2998 if (vha->flags.process_response_queue &&
2999 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3000 qla24xx_process_response_queue(vha, rsp);
3001
3002 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3003 return QLA_SUCCESS;
3004
3005queuing_error_fcp_cmnd:
3006 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3007queuing_error:
3008 if (tot_dsds)
3009 scsi_dma_unmap(cmd);
3010
3011 if (sp->ctx) {
3012 mempool_free(sp->ctx, ha->ctx_mempool);
3013 sp->ctx = NULL;
3014 }
3015 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3016
3017 return QLA_FUNCTION_FAILED;
3018}
3019
3020static uint32_t * 2542static uint32_t *
3021qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2543qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
3022 uint32_t length) 2544 uint32_t length)
@@ -3268,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3268} 2790}
3269 2791
3270void 2792void
3271qla82xx_start_iocbs(srb_t *sp) 2793qla82xx_start_iocbs(scsi_qla_host_t *vha)
3272{ 2794{
3273 struct qla_hw_data *ha = sp->fcport->vha->hw; 2795 struct qla_hw_data *ha = vha->hw;
3274 struct req_que *req = ha->req_q_map[0]; 2796 struct req_que *req = ha->req_q_map[0];
3275 struct device_reg_82xx __iomem *reg; 2797 struct device_reg_82xx __iomem *reg;
3276 uint32_t dbval; 2798 uint32_t dbval;