aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc/bnx2fc_hwi.c
diff options
context:
space:
mode:
authorVlad Zolotarov <vladz@broadcom.com>2011-06-14 07:33:44 -0400
committerDavid S. Miller <davem@conan.davemloft.net>2011-06-15 10:56:37 -0400
commit619c5cb6885b936c44ae1422ef805b69c6291485 (patch)
tree4604ae08f1eb12c6ad1f65106879c2e73946ae12 /drivers/scsi/bnx2fc/bnx2fc_hwi.c
parent042181f5aa8833a8918e1a91cfaf292146ffc62c (diff)
New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc
New FW/HSI (7.0): - Added support to 578xx chips - Improved HSI - much less driver's direct access to the FW internal memory needed. New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c): - Introduced chip dependent objects that have chip independent interfaces for configuration of MACs, multicast addresses, Rx mode, indirection table, fast path queues and function initialization/cleanup. - Objects functionality is based on the private function pointers, which allows not only a per-chip but also PF/VF differentiation while still preserving the same interface towards the driver. - Objects interface is not influenced by the HSI changes which do not require providing new parameters keeping the code outside the bnx2x_sp.c invariant with regard to such HSI chnages. Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI. Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@conan.davemloft.net>
Diffstat (limited to 'drivers/scsi/bnx2fc/bnx2fc_hwi.c')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c442
1 files changed, 237 insertions, 205 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f756d5f85c7a..d8e8a825560d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -100,6 +100,9 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << 100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102 102
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105
103 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 106 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
104 fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 107 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
105 ((u64) hba->hash_tbl_pbl_dma >> 32); 108 ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -122,6 +125,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
122 fcoe_init3.error_bit_map_lo = 0xffffffff; 125 fcoe_init3.error_bit_map_lo = 0xffffffff;
123 fcoe_init3.error_bit_map_hi = 0xffffffff; 126 fcoe_init3.error_bit_map_hi = 0xffffffff;
124 127
128 fcoe_init3.perf_config = 1;
125 129
126 kwqe_arr[0] = (struct kwqe *) &fcoe_init1; 130 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
127 kwqe_arr[1] = (struct kwqe *) &fcoe_init2; 131 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
@@ -289,19 +293,19 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
289 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; 293 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
290 294
291 295
292 ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; 296 ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
293 /* local mac */ 297 /* local mac */
294 ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; 298 ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
295 ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; 299 ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
296 ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; 300 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
297 ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; 301 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
298 ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; 302 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
299 ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 303 ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
300 ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 304 ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
301 ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 305 ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
302 ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 306 ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
303 ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 307 ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
304 ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 308 ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
305 309
306 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 310 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
307 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 311 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -345,20 +349,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
345 enbl_req.hdr.flags = 349 enbl_req.hdr.flags =
346 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 350 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
347 351
348 enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; 352 enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
349 /* local mac */ 353 /* local mac */
350 enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; 354 enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
351 enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; 355 enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
352 enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; 356 enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
353 enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; 357 enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
354 enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; 358 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
355 359 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
356 enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 360
357 enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 361 enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
358 enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 362 enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
359 enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 363 enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
360 enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 364 enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
361 enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 365 enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
366 enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
362 367
363 port_id = fc_host_port_id(lport->host); 368 port_id = fc_host_port_id(lport->host);
364 if (port_id != tgt->sid) { 369 if (port_id != tgt->sid) {
@@ -411,18 +416,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
411 disable_req.hdr.flags = 416 disable_req.hdr.flags =
412 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 417 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
413 418
414 disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; 419 disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
415 disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; 420 disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
416 disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; 421 disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
417 disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; 422 disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
418 disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; 423 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
424 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
419 425
420 disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 426 disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
421 disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 427 disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
422 disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 428 disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
423 disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 429 disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
424 disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 430 disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
425 disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 431 disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
426 432
427 port_id = tgt->sid; 433 port_id = tgt->sid;
428 disable_req.s_id[0] = (port_id & 0x000000FF); 434 disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -640,10 +646,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
640 xid = err_entry->fc_hdr.ox_id; 646 xid = err_entry->fc_hdr.ox_id;
641 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); 647 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
642 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", 648 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
643 err_entry->err_warn_bitmap_hi, 649 err_entry->data.err_warn_bitmap_hi,
644 err_entry->err_warn_bitmap_lo); 650 err_entry->data.err_warn_bitmap_lo);
645 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 651 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
646 err_entry->tx_buf_off, err_entry->rx_buf_off); 652 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
647 653
648 bnx2fc_return_rqe(tgt, 1); 654 bnx2fc_return_rqe(tgt, 1);
649 655
@@ -722,10 +728,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
722 xid = cpu_to_be16(err_entry->fc_hdr.ox_id); 728 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
723 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); 729 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
724 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", 730 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
725 err_entry->err_warn_bitmap_hi, 731 err_entry->data.err_warn_bitmap_hi,
726 err_entry->err_warn_bitmap_lo); 732 err_entry->data.err_warn_bitmap_lo);
727 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 733 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
728 err_entry->tx_buf_off, err_entry->rx_buf_off); 734 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
729 735
730 bnx2fc_return_rqe(tgt, 1); 736 bnx2fc_return_rqe(tgt, 1);
731 spin_unlock_bh(&tgt->tgt_lock); 737 spin_unlock_bh(&tgt->tgt_lock);
@@ -762,9 +768,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
762 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; 768 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
763 task = &(task_page[index]); 769 task = &(task_page[index]);
764 770
765 num_rq = ((task->rx_wr_tx_rd.rx_flags & 771 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
766 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> 772 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
767 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); 773 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
768 774
769 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 775 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
770 776
@@ -777,22 +783,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
777 /* Timestamp IO completion time */ 783 /* Timestamp IO completion time */
778 cmd_type = io_req->cmd_type; 784 cmd_type = io_req->cmd_type;
779 785
780 /* optimized completion path */ 786 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
781 if (cmd_type == BNX2FC_SCSI_CMD) { 787 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
782 rx_state = ((task->rx_wr_tx_rd.rx_flags & 788 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
783 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
784 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
785 789
790 /* Process other IO completion types */
791 switch (cmd_type) {
792 case BNX2FC_SCSI_CMD:
786 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { 793 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
787 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); 794 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
788 spin_unlock_bh(&tgt->tgt_lock); 795 spin_unlock_bh(&tgt->tgt_lock);
789 return; 796 return;
790 } 797 }
791 }
792 798
793 /* Process other IO completion types */
794 switch (cmd_type) {
795 case BNX2FC_SCSI_CMD:
796 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) 799 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
797 bnx2fc_process_abts_compl(io_req, task, num_rq); 800 bnx2fc_process_abts_compl(io_req, task, num_rq);
798 else if (rx_state == 801 else if (rx_state ==
@@ -819,8 +822,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
819 break; 822 break;
820 823
821 case BNX2FC_ELS: 824 case BNX2FC_ELS:
822 BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); 825 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
823 bnx2fc_process_els_compl(io_req, task, num_rq); 826 bnx2fc_process_els_compl(io_req, task, num_rq);
827 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
828 bnx2fc_process_abts_compl(io_req, task, num_rq);
829 else if (rx_state ==
830 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
831 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
832 else
833 printk(KERN_ERR PFX "Invalid rx state = %d\n",
834 rx_state);
824 break; 835 break;
825 836
826 case BNX2FC_CLEANUP: 837 case BNX2FC_CLEANUP:
@@ -835,6 +846,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
835 spin_unlock_bh(&tgt->tgt_lock); 846 spin_unlock_bh(&tgt->tgt_lock);
836} 847}
837 848
849void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
850{
851 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
852 u32 msg;
853
854 wmb();
855 rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
856 FCOE_CQE_TOGGLE_BIT_SHIFT);
857 msg = *((u32 *)rx_db);
858 writel(cpu_to_le32(msg), tgt->ctx_base);
859 mmiowb();
860
861}
862
838struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) 863struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
839{ 864{
840 struct bnx2fc_work *work; 865 struct bnx2fc_work *work;
@@ -853,8 +878,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
853 struct fcoe_cqe *cq; 878 struct fcoe_cqe *cq;
854 u32 cq_cons; 879 u32 cq_cons;
855 struct fcoe_cqe *cqe; 880 struct fcoe_cqe *cqe;
881 u32 num_free_sqes = 0;
856 u16 wqe; 882 u16 wqe;
857 bool more_cqes_found = false;
858 883
859 /* 884 /*
860 * cq_lock is a low contention lock used to protect 885 * cq_lock is a low contention lock used to protect
@@ -872,62 +897,51 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
872 cq_cons = tgt->cq_cons_idx; 897 cq_cons = tgt->cq_cons_idx;
873 cqe = &cq[cq_cons]; 898 cqe = &cq[cq_cons];
874 899
875 do { 900 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
876 more_cqes_found ^= true; 901 (tgt->cq_curr_toggle_bit <<
877 902 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
878 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
879 (tgt->cq_curr_toggle_bit <<
880 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
881 903
882 /* new entry on the cq */ 904 /* new entry on the cq */
883 if (wqe & FCOE_CQE_CQE_TYPE) { 905 if (wqe & FCOE_CQE_CQE_TYPE) {
884 /* Unsolicited event notification */ 906 /* Unsolicited event notification */
885 bnx2fc_process_unsol_compl(tgt, wqe); 907 bnx2fc_process_unsol_compl(tgt, wqe);
886 } else { 908 } else {
887 struct bnx2fc_work *work = NULL; 909 /* Pending work request completion */
888 struct bnx2fc_percpu_s *fps = NULL; 910 struct bnx2fc_work *work = NULL;
889 unsigned int cpu = wqe % num_possible_cpus(); 911 struct bnx2fc_percpu_s *fps = NULL;
890 912 unsigned int cpu = wqe % num_possible_cpus();
891 fps = &per_cpu(bnx2fc_percpu, cpu); 913
892 spin_lock_bh(&fps->fp_work_lock); 914 fps = &per_cpu(bnx2fc_percpu, cpu);
893 if (unlikely(!fps->iothread)) 915 spin_lock_bh(&fps->fp_work_lock);
894 goto unlock; 916 if (unlikely(!fps->iothread))
895 917 goto unlock;
896 work = bnx2fc_alloc_work(tgt, wqe); 918
897 if (work) 919 work = bnx2fc_alloc_work(tgt, wqe);
898 list_add_tail(&work->list, 920 if (work)
899 &fps->work_list); 921 list_add_tail(&work->list,
922 &fps->work_list);
900unlock: 923unlock:
901 spin_unlock_bh(&fps->fp_work_lock); 924 spin_unlock_bh(&fps->fp_work_lock);
902 925
903 /* Pending work request completion */ 926 /* Pending work request completion */
904 if (fps->iothread && work) 927 if (fps->iothread && work)
905 wake_up_process(fps->iothread); 928 wake_up_process(fps->iothread);
906 else 929 else
907 bnx2fc_process_cq_compl(tgt, wqe); 930 bnx2fc_process_cq_compl(tgt, wqe);
908 }
909 cqe++;
910 tgt->cq_cons_idx++;
911
912 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
913 tgt->cq_cons_idx = 0;
914 cqe = cq;
915 tgt->cq_curr_toggle_bit =
916 1 - tgt->cq_curr_toggle_bit;
917 }
918 } 931 }
919 /* Re-arm CQ */ 932 cqe++;
920 if (more_cqes_found) { 933 tgt->cq_cons_idx++;
921 tgt->conn_db->cq_arm.lo = -1; 934 num_free_sqes++;
922 wmb(); 935
936 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
937 tgt->cq_cons_idx = 0;
938 cqe = cq;
939 tgt->cq_curr_toggle_bit =
940 1 - tgt->cq_curr_toggle_bit;
923 } 941 }
924 } while (more_cqes_found); 942 }
925 943 bnx2fc_arm_cq(tgt);
926 /* 944 atomic_add(num_free_sqes, &tgt->free_sqes);
927 * Commit tgt->cq_cons_idx change to the memory
928 * spin_lock implies full memory barrier, no need to smp_wmb
929 */
930
931 spin_unlock_bh(&tgt->cq_lock); 945 spin_unlock_bh(&tgt->cq_lock);
932 return 0; 946 return 0;
933} 947}
@@ -1141,7 +1155,11 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1141 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: 1155 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1142 printk(KERN_ERR PFX "init_failure due to NIC error\n"); 1156 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1143 break; 1157 break;
1144 1158 case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1159 printk(KERN_ERR PFX "init failure due to compl status err\n");
1160 break;
1161 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1162 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1145 default: 1163 default:
1146 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1164 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1147 } 1165 }
@@ -1247,21 +1265,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1247 1265
1248void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) 1266void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1249{ 1267{
1250 struct b577xx_doorbell_set_prod ev_doorbell; 1268 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1251 u32 msg; 1269 u32 msg;
1252 1270
1253 wmb(); 1271 wmb();
1254 1272 sq_db->prod = tgt->sq_prod_idx |
1255 memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
1256 ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
1257
1258 ev_doorbell.prod = tgt->sq_prod_idx |
1259 (tgt->sq_curr_toggle_bit << 15); 1273 (tgt->sq_curr_toggle_bit << 15);
1260 ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << 1274 msg = *((u32 *)sq_db);
1261 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
1262 msg = *((u32 *)&ev_doorbell);
1263 writel(cpu_to_le32(msg), tgt->ctx_base); 1275 writel(cpu_to_le32(msg), tgt->ctx_base);
1264
1265 mmiowb(); 1276 mmiowb();
1266 1277
1267} 1278}
@@ -1322,18 +1333,26 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1322 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1333 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1323 1334
1324 /* Tx Write Rx Read */ 1335 /* Tx Write Rx Read */
1325 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << 1336 /* init flags */
1326 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1337 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1327 task->tx_wr_rx_rd.init_flags = task_type << 1338 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1328 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1339 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1329 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1340 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1330 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1341 task->txwr_rxrd.const_ctx.init_flags |=
1331 /* Common */ 1342 FCOE_TASK_DEV_TYPE_DISK <<
1332 task->cmn.common_flags = context_id << 1343 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1333 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; 1344 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1334 task->cmn.general.cleanup_info.task_id = orig_xid; 1345
1335 1346 /* Tx flags */
1336 1347 task->txwr_rxrd.const_ctx.tx_flags =
1348 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1349 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1350
1351 /* Rx Read Tx Write */
1352 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1353 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1354 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1355 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1337} 1356}
1338 1357
1339void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 1358void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
@@ -1342,6 +1361,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1342 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 1361 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1343 struct bnx2fc_rport *tgt = io_req->tgt; 1362 struct bnx2fc_rport *tgt = io_req->tgt;
1344 struct fc_frame_header *fc_hdr; 1363 struct fc_frame_header *fc_hdr;
1364 struct fcoe_ext_mul_sges_ctx *sgl;
1345 u8 task_type = 0; 1365 u8 task_type = 0;
1346 u64 *hdr; 1366 u64 *hdr;
1347 u64 temp_hdr[3]; 1367 u64 temp_hdr[3];
@@ -1367,47 +1387,49 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1367 /* Tx only */ 1387 /* Tx only */
1368 if ((task_type == FCOE_TASK_TYPE_MIDPATH) || 1388 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1369 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { 1389 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1370 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1390 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1371 (u32)mp_req->mp_req_bd_dma; 1391 (u32)mp_req->mp_req_bd_dma;
1372 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1392 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1373 (u32)((u64)mp_req->mp_req_bd_dma >> 32); 1393 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1374 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; 1394 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1375 BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
1376 (unsigned long long)mp_req->mp_req_bd_dma);
1377 } 1395 }
1378 1396
1379 /* Tx Write Rx Read */ 1397 /* Tx Write Rx Read */
1380 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << 1398 /* init flags */
1381 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1399 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1382 task->tx_wr_rx_rd.init_flags = task_type << 1400 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1383 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1401 task->txwr_rxrd.const_ctx.init_flags |=
1384 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << 1402 FCOE_TASK_DEV_TYPE_DISK <<
1385 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; 1403 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1386 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1404 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1387 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1405 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1388 1406
1389 /* Common */ 1407 /* tx flags */
1390 task->cmn.data_2_trns = io_req->data_xfer_len; 1408 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1391 context_id = tgt->context_id; 1409 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1392 task->cmn.common_flags = context_id <<
1393 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1394 task->cmn.common_flags |= 1 <<
1395 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1396 task->cmn.common_flags |= 1 <<
1397 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1398 1410
1399 /* Rx Write Tx Read */ 1411 /* Rx Write Tx Read */
1412 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1413
1414 /* rx flags */
1415 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1416 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1417
1418 context_id = tgt->context_id;
1419 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1421
1400 fc_hdr = &(mp_req->req_fc_hdr); 1422 fc_hdr = &(mp_req->req_fc_hdr);
1401 if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1423 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1402 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); 1424 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1403 fc_hdr->fh_rx_id = htons(0xffff); 1425 fc_hdr->fh_rx_id = htons(0xffff);
1404 task->rx_wr_tx_rd.rx_id = 0xffff; 1426 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1405 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { 1427 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1406 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); 1428 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1407 } 1429 }
1408 1430
1409 /* Fill FC Header into middle path buffer */ 1431 /* Fill FC Header into middle path buffer */
1410 hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; 1432 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1411 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); 1433 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1412 hdr[0] = cpu_to_be64(temp_hdr[0]); 1434 hdr[0] = cpu_to_be64(temp_hdr[0]);
1413 hdr[1] = cpu_to_be64(temp_hdr[1]); 1435 hdr[1] = cpu_to_be64(temp_hdr[1]);
@@ -1415,12 +1437,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1415 1437
1416 /* Rx Only */ 1438 /* Rx Only */
1417 if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1439 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1440 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1418 1441
1419 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1442 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1420 (u32)mp_req->mp_resp_bd_dma; 1443 sgl->mul_sgl.cur_sge_addr.hi =
1421 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1422 (u32)((u64)mp_req->mp_resp_bd_dma >> 32); 1444 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1423 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; 1445 sgl->mul_sgl.sgl_size = 1;
1424 } 1446 }
1425} 1447}
1426 1448
@@ -1431,6 +1453,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1431 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1453 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1432 struct io_bdt *bd_tbl = io_req->bd_tbl; 1454 struct io_bdt *bd_tbl = io_req->bd_tbl;
1433 struct bnx2fc_rport *tgt = io_req->tgt; 1455 struct bnx2fc_rport *tgt = io_req->tgt;
1456 struct fcoe_cached_sge_ctx *cached_sge;
1457 struct fcoe_ext_mul_sges_ctx *sgl;
1434 u64 *fcp_cmnd; 1458 u64 *fcp_cmnd;
1435 u64 tmp_fcp_cmnd[4]; 1459 u64 tmp_fcp_cmnd[4];
1436 u32 context_id; 1460 u32 context_id;
@@ -1449,47 +1473,33 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1449 1473
1450 /* Tx only */ 1474 /* Tx only */
1451 if (task_type == FCOE_TASK_TYPE_WRITE) { 1475 if (task_type == FCOE_TASK_TYPE_WRITE) {
1452 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1476 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1453 (u32)bd_tbl->bd_tbl_dma; 1477 (u32)bd_tbl->bd_tbl_dma;
1454 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1478 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1455 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1479 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1456 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1480 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1457 bd_tbl->bd_valid; 1481 bd_tbl->bd_valid;
1458 } 1482 }
1459 1483
1460 /*Tx Write Rx Read */ 1484 /*Tx Write Rx Read */
1461 /* Init state to NORMAL */ 1485 /* Init state to NORMAL */
1462 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << 1486 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1463 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1487 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1464 task->tx_wr_rx_rd.init_flags = task_type << 1488 task->txwr_rxrd.const_ctx.init_flags |=
1465 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1489 FCOE_TASK_DEV_TYPE_DISK <<
1466 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << 1490 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1467 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; 1491 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1468 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1492 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1469 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1493 /* tx flags */
1470 1494 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1471 /* Common */ 1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1472 task->cmn.data_2_trns = io_req->data_xfer_len;
1473 context_id = tgt->context_id;
1474 task->cmn.common_flags = context_id <<
1475 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1476 task->cmn.common_flags |= 1 <<
1477 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1478 task->cmn.common_flags |= 1 <<
1479 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1480
1481 /* Set initiative ownership */
1482 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
1483 1496
1484 /* Set initial seq counter */ 1497 /* Set initial seq counter */
1485 task->cmn.tx_low_seq_cnt = 1; 1498 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1486
1487 /* Set state to "waiting for the first packet" */
1488 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
1489 1499
1490 /* Fill FCP_CMND IU */ 1500 /* Fill FCP_CMND IU */
1491 fcp_cmnd = (u64 *) 1501 fcp_cmnd = (u64 *)
1492 task->cmn.general.cmd_info.fcp_cmd_payload.opaque; 1502 task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1493 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); 1503 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1494 1504
1495 /* swap fcp_cmnd */ 1505 /* swap fcp_cmnd */
@@ -1501,32 +1511,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1501 } 1511 }
1502 1512
1503 /* Rx Write Tx Read */ 1513 /* Rx Write Tx Read */
1504 task->rx_wr_tx_rd.rx_id = 0xffff; 1514 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1515
1516 context_id = tgt->context_id;
1517 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1518 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1519
1520 /* rx flags */
1521 /* Set state to "waiting for the first packet" */
1522 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1523 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1524
1525 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1505 1526
1506 /* Rx Only */ 1527 /* Rx Only */
1528 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1529 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1530 bd_count = bd_tbl->bd_valid;
1507 if (task_type == FCOE_TASK_TYPE_READ) { 1531 if (task_type == FCOE_TASK_TYPE_READ) {
1508
1509 bd_count = bd_tbl->bd_valid;
1510 if (bd_count == 1) { 1532 if (bd_count == 1) {
1511 1533
1512 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1534 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1513 1535
1514 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = 1536 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1515 fcoe_bd_tbl->buf_addr_lo; 1537 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1516 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = 1538 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1517 fcoe_bd_tbl->buf_addr_hi; 1539 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1518 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = 1540 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1519 fcoe_bd_tbl->buf_len; 1541 } else if (bd_count == 2) {
1520 task->tx_wr_rx_rd.init_flags |= 1 << 1542 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1521 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; 1543
1544 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1545 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1546 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1547
1548 fcoe_bd_tbl++;
1549 cached_sge->second_buf_addr.lo =
1550 fcoe_bd_tbl->buf_addr_lo;
1551 cached_sge->second_buf_addr.hi =
1552 fcoe_bd_tbl->buf_addr_hi;
1553 cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1554 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1555 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1522 } else { 1556 } else {
1523 1557
1524 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1558 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1525 (u32)bd_tbl->bd_tbl_dma; 1559 sgl->mul_sgl.cur_sge_addr.hi =
1526 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1527 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1560 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1528 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1561 sgl->mul_sgl.sgl_size = bd_count;
1529 bd_tbl->bd_valid;
1530 } 1562 }
1531 } 1563 }
1532} 1564}