aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
authorSaurav Kashyap <saurav.kashyap@qlogic.com>2012-08-22 14:21:01 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-09-24 04:10:47 -0400
commita9b6f722f62d0a302b980a4fdcdf9c9933955772 (patch)
treed353225c380d8183faddbf535a6116ada1a41279 /drivers/scsi/qla2xxx/qla_iocb.c
parent5f16b331d83757ad5154af07b449c722fef45d5e (diff)
[SCSI] qla2xxx: Implementation of bidirectional.
[jejb: merge fix for introduced warning] Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com> Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c198
1 files changed, 198 insertions, 0 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 23e83757fa5d..74c69ba39a04 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2665,3 +2665,201 @@ done:
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666 return rval; 2666 return rval;
2667} 2667}
2668
2669static void
2670qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2671 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2672{
2673 uint16_t avail_dsds;
2674 uint32_t *cur_dsd;
2675 uint32_t req_data_len = 0;
2676 uint32_t rsp_data_len = 0;
2677 struct scatterlist *sg;
2678 int index;
2679 int entry_count = 1;
2680 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2681
2682 /*Update entry type to indicate bidir command */
2683 *((uint32_t *)(&cmd_pkt->entry_type)) =
2684 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2685
2686 /* Set the transfer direction, in this set both flags
2687 * Also set the BD_WRAP_BACK flag, firmware will take care
2688 * assigning DID=SID for outgoing pkts.
2689 */
2690 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2691 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2692 cmd_pkt->control_flags =
2693 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2694 BD_WRAP_BACK);
2695
2696 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2697 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2698 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2699 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2700
2701 vha->bidi_stats.transfer_bytes += req_data_len;
2702 vha->bidi_stats.io_count++;
2703
2704 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2705 * are bundled in continuation iocb
2706 */
2707 avail_dsds = 1;
2708 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2709
2710 index = 0;
2711
2712 for_each_sg(bsg_job->request_payload.sg_list, sg,
2713 bsg_job->request_payload.sg_cnt, index) {
2714 dma_addr_t sle_dma;
2715 cont_a64_entry_t *cont_pkt;
2716
2717 /* Allocate additional continuation packets */
2718 if (avail_dsds == 0) {
2719 /* Continuation type 1 IOCB can accomodate
2720 * 5 DSDS
2721 */
2722 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2723 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2724 avail_dsds = 5;
2725 entry_count++;
2726 }
2727 sle_dma = sg_dma_address(sg);
2728 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2729 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2730 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2731 avail_dsds--;
2732 }
2733 /* For read request DSD will always goes to continuation IOCB
2734 * and follow the write DSD. If there is room on the current IOCB
2735 * then it is added to that IOCB else new continuation IOCB is
2736 * allocated.
2737 */
2738 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2739 bsg_job->reply_payload.sg_cnt, index) {
2740 dma_addr_t sle_dma;
2741 cont_a64_entry_t *cont_pkt;
2742
2743 /* Allocate additional continuation packets */
2744 if (avail_dsds == 0) {
2745 /* Continuation type 1 IOCB can accomodate
2746 * 5 DSDS
2747 */
2748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2749 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2750 avail_dsds = 5;
2751 entry_count++;
2752 }
2753 sle_dma = sg_dma_address(sg);
2754 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2755 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2756 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2757 avail_dsds--;
2758 }
2759 /* This value should be same as number of IOCB required for this cmd */
2760 cmd_pkt->entry_count = entry_count;
2761}
2762
2763int
2764qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2765{
2766
2767 struct qla_hw_data *ha = vha->hw;
2768 unsigned long flags;
2769 uint32_t handle;
2770 uint32_t index;
2771 uint16_t req_cnt;
2772 uint16_t cnt;
2773 uint32_t *clr_ptr;
2774 struct cmd_bidir *cmd_pkt = NULL;
2775 struct rsp_que *rsp;
2776 struct req_que *req;
2777 int rval = EXT_STATUS_OK;
2778 device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
2779
2780 rval = QLA_SUCCESS;
2781
2782 rsp = ha->rsp_q_map[0];
2783 req = vha->req;
2784
2785 /* Send marker if required */
2786 if (vha->marker_needed != 0) {
2787 if (qla2x00_marker(vha, req,
2788 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2789 return EXT_STATUS_MAILBOX;
2790 vha->marker_needed = 0;
2791 }
2792
2793 /* Acquire ring specific lock */
2794 spin_lock_irqsave(&ha->hardware_lock, flags);
2795
2796 /* Check for room in outstanding command list. */
2797 handle = req->current_outstanding_cmd;
2798 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2799 handle++;
2800 if (handle == MAX_OUTSTANDING_COMMANDS)
2801 handle = 1;
2802 if (!req->outstanding_cmds[handle])
2803 break;
2804 }
2805
2806 if (index == MAX_OUTSTANDING_COMMANDS) {
2807 rval = EXT_STATUS_BUSY;
2808 goto queuing_error;
2809 }
2810
2811 /* Calculate number of IOCB required */
2812 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2813
2814 /* Check for room on request queue. */
2815 if (req->cnt < req_cnt + 2) {
2816 if (ha->mqenable)
2817 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2818 else if (IS_QLA82XX(ha))
2819 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2820 else if (IS_FWI2_CAPABLE(ha))
2821 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2822 else
2823 cnt = qla2x00_debounce_register(
2824 ISP_REQ_Q_OUT(ha, &reg->isp));
2825
2826 if (req->ring_index < cnt)
2827 req->cnt = cnt - req->ring_index;
2828 else
2829 req->cnt = req->length -
2830 (req->ring_index - cnt);
2831 }
2832 if (req->cnt < req_cnt + 2) {
2833 rval = EXT_STATUS_BUSY;
2834 goto queuing_error;
2835 }
2836
2837 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2838 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2839
2840 /* Zero out remaining portion of packet. */
2841 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2842 clr_ptr = (uint32_t *)cmd_pkt + 2;
2843 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2844
2845 /* Set NPORT-ID (of vha)*/
2846 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2847 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2848 cmd_pkt->port_id[1] = vha->d_id.b.area;
2849 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2850
2851 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2852 cmd_pkt->entry_status = (uint8_t) rsp->id;
2853 /* Build command packet. */
2854 req->current_outstanding_cmd = handle;
2855 req->outstanding_cmds[handle] = sp;
2856 sp->handle = handle;
2857 req->cnt -= req_cnt;
2858
2859 /* Send the command to the firmware */
2860 wmb();
2861 qla2x00_start_iocbs(vha, req);
2862queuing_error:
2863 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2864 return rval;
2865}