aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJes Sorensen <jes@sgi.com>2007-11-09 08:40:41 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2007-11-14 19:25:44 -0500
commit5c1da582b3a95123ffb1e70ec7cd60e757c7c8c2 (patch)
tree33f09734c4e8f8e2ee9a5abb5021c412a96bd911 /drivers
parent6ee6a2f0258c064bbc64ad97dc195063457ebebe (diff)
[SCSI] qla1280: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Fixed to missing initialization of sg lists before calling for_each_sg() by Jes Sorensen - sg list needs to be initialized before trying to pull the elements out of it. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jes Sorensen <jes@sgi.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla1280.c387
1 files changed, 166 insertions, 221 deletions
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3aeb68bcb7ac..146d540f6281 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha)
1310 } 1310 }
1311 1311
1312 /* Release memory used for this I/O */ 1312 /* Release memory used for this I/O */
1313 if (cmd->use_sg) { 1313 scsi_dma_unmap(cmd);
1314 pci_unmap_sg(ha->pdev, cmd->request_buffer,
1315 cmd->use_sg, cmd->sc_data_direction);
1316 } else if (cmd->request_bufflen) {
1317 pci_unmap_single(ha->pdev, sp->saved_dma_handle,
1318 cmd->request_bufflen,
1319 cmd->sc_data_direction);
1320 }
1321 1314
1322 /* Call the mid-level driver interrupt handler */ 1315 /* Call the mid-level driver interrupt handler */
1323 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; 1316 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
@@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1406 break; 1399 break;
1407 1400
1408 case CS_DATA_UNDERRUN: 1401 case CS_DATA_UNDERRUN:
1409 if ((cp->request_bufflen - residual_length) < 1402 if ((scsi_bufflen(cp) - residual_length) <
1410 cp->underflow) { 1403 cp->underflow) {
1411 printk(KERN_WARNING 1404 printk(KERN_WARNING
1412 "scsi: Underflow detected - retrying " 1405 "scsi: Underflow detected - retrying "
1413 "command.\n"); 1406 "command.\n");
1414 host_status = DID_ERROR; 1407 host_status = DID_ERROR;
1415 } else { 1408 } else {
1416 cp->resid = residual_length; 1409 scsi_set_resid(cp, residual_length);
1417 host_status = DID_OK; 1410 host_status = DID_OK;
1418 } 1411 }
1419 break; 1412 break;
@@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2775 struct device_reg __iomem *reg = ha->iobase; 2768 struct device_reg __iomem *reg = ha->iobase;
2776 struct scsi_cmnd *cmd = sp->cmd; 2769 struct scsi_cmnd *cmd = sp->cmd;
2777 cmd_a64_entry_t *pkt; 2770 cmd_a64_entry_t *pkt;
2778 struct scatterlist *sg = NULL, *s;
2779 __le32 *dword_ptr; 2771 __le32 *dword_ptr;
2780 dma_addr_t dma_handle; 2772 dma_addr_t dma_handle;
2781 int status = 0; 2773 int status = 0;
2782 int cnt; 2774 int cnt;
2783 int req_cnt; 2775 int req_cnt;
2784 u16 seg_cnt; 2776 int seg_cnt;
2785 u8 dir; 2777 u8 dir;
2786 2778
2787 ENTER("qla1280_64bit_start_scsi:"); 2779 ENTER("qla1280_64bit_start_scsi:");
2788 2780
2789 /* Calculate number of entries and segments required. */ 2781 /* Calculate number of entries and segments required. */
2790 req_cnt = 1; 2782 req_cnt = 1;
2791 if (cmd->use_sg) { 2783 seg_cnt = scsi_dma_map(cmd);
2792 sg = (struct scatterlist *) cmd->request_buffer; 2784 if (seg_cnt > 0) {
2793 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg,
2794 cmd->sc_data_direction);
2795
2796 if (seg_cnt > 2) { 2785 if (seg_cnt > 2) {
2797 req_cnt += (seg_cnt - 2) / 5; 2786 req_cnt += (seg_cnt - 2) / 5;
2798 if ((seg_cnt - 2) % 5) 2787 if ((seg_cnt - 2) % 5)
2799 req_cnt++; 2788 req_cnt++;
2800 } 2789 }
2801 } else if (cmd->request_bufflen) { /* If data transfer. */ 2790 } else if (seg_cnt < 0) {
2802 seg_cnt = 1; 2791 status = 1;
2803 } else { 2792 goto out;
2804 seg_cnt = 0;
2805 } 2793 }
2806 2794
2807 if ((req_cnt + 2) >= ha->req_q_cnt) { 2795 if ((req_cnt + 2) >= ha->req_q_cnt) {
@@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2889 * Load data segments. 2877 * Load data segments.
2890 */ 2878 */
2891 if (seg_cnt) { /* If data transfer. */ 2879 if (seg_cnt) { /* If data transfer. */
2880 struct scatterlist *sg, *s;
2892 int remseg = seg_cnt; 2881 int remseg = seg_cnt;
2882
2883 sg = scsi_sglist(cmd);
2884
2893 /* Setup packet address segment pointer. */ 2885 /* Setup packet address segment pointer. */
2894 dword_ptr = (u32 *)&pkt->dseg_0_address; 2886 dword_ptr = (u32 *)&pkt->dseg_0_address;
2895 2887
2896 if (cmd->use_sg) { /* If scatter gather */ 2888 /* Load command entry data segments. */
2897 /* Load command entry data segments. */ 2889 for_each_sg(sg, s, seg_cnt, cnt) {
2898 for_each_sg(sg, s, seg_cnt, cnt) { 2890 if (cnt == 2)
2899 if (cnt == 2) 2891 break;
2892
2893 dma_handle = sg_dma_address(s);
2894#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2895 if (ha->flags.use_pci_vchannel)
2896 sn_pci_set_vchan(ha->pdev,
2897 (unsigned long *)&dma_handle,
2898 SCSI_BUS_32(cmd));
2899#endif
2900 *dword_ptr++ =
2901 cpu_to_le32(pci_dma_lo32(dma_handle));
2902 *dword_ptr++ =
2903 cpu_to_le32(pci_dma_hi32(dma_handle));
2904 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2905 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2906 cpu_to_le32(pci_dma_hi32(dma_handle)),
2907 cpu_to_le32(pci_dma_lo32(dma_handle)),
2908 cpu_to_le32(sg_dma_len(sg_next(s))));
2909 remseg--;
2910 }
2911 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2912 "command packet data - b %i, t %i, l %i \n",
2913 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2914 SCSI_LUN_32(cmd));
2915 qla1280_dump_buffer(5, (char *)pkt,
2916 REQUEST_ENTRY_SIZE);
2917
2918 /*
2919 * Build continuation packets.
2920 */
2921 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2922 "remains\n", seg_cnt);
2923
2924 while (remseg > 0) {
2925 /* Update sg start */
2926 sg = s;
2927 /* Adjust ring index. */
2928 ha->req_ring_index++;
2929 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2930 ha->req_ring_index = 0;
2931 ha->request_ring_ptr =
2932 ha->request_ring;
2933 } else
2934 ha->request_ring_ptr++;
2935
2936 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2937
2938 /* Zero out packet. */
2939 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2940
2941 /* Load packet defaults. */
2942 ((struct cont_a64_entry *) pkt)->entry_type =
2943 CONTINUE_A64_TYPE;
2944 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2945 ((struct cont_a64_entry *) pkt)->sys_define =
2946 (uint8_t)ha->req_ring_index;
2947 /* Setup packet address segment pointer. */
2948 dword_ptr =
2949 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2950
2951 /* Load continuation entry data segments. */
2952 for_each_sg(sg, s, remseg, cnt) {
2953 if (cnt == 5)
2900 break; 2954 break;
2901 dma_handle = sg_dma_address(s); 2955 dma_handle = sg_dma_address(s);
2902#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2956#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2903 if (ha->flags.use_pci_vchannel) 2957 if (ha->flags.use_pci_vchannel)
2904 sn_pci_set_vchan(ha->pdev, 2958 sn_pci_set_vchan(ha->pdev,
2905 (unsigned long *)&dma_handle, 2959 (unsigned long *)&dma_handle,
2906 SCSI_BUS_32(cmd)); 2960 SCSI_BUS_32(cmd));
2907#endif 2961#endif
2908 *dword_ptr++ = 2962 *dword_ptr++ =
2909 cpu_to_le32(pci_dma_lo32(dma_handle)); 2963 cpu_to_le32(pci_dma_lo32(dma_handle));
2910 *dword_ptr++ = 2964 *dword_ptr++ =
2911 cpu_to_le32(pci_dma_hi32(dma_handle)); 2965 cpu_to_le32(pci_dma_hi32(dma_handle));
2912 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 2966 *dword_ptr++ =
2913 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2967 cpu_to_le32(sg_dma_len(s));
2968 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2914 cpu_to_le32(pci_dma_hi32(dma_handle)), 2969 cpu_to_le32(pci_dma_hi32(dma_handle)),
2915 cpu_to_le32(pci_dma_lo32(dma_handle)), 2970 cpu_to_le32(pci_dma_lo32(dma_handle)),
2916 cpu_to_le32(sg_dma_len(sg_next(s)))); 2971 cpu_to_le32(sg_dma_len(s)));
2917 remseg--;
2918 } 2972 }
2919 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 2973 remseg -= cnt;
2920 "command packet data - b %i, t %i, l %i \n", 2974 dprintk(5, "qla1280_64bit_start_scsi: "
2921 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 2975 "continuation packet data - b %i, t "
2922 SCSI_LUN_32(cmd)); 2976 "%i, l %i \n", SCSI_BUS_32(cmd),
2923 qla1280_dump_buffer(5, (char *)pkt, 2977 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2924 REQUEST_ENTRY_SIZE);
2925
2926 /*
2927 * Build continuation packets.
2928 */
2929 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2930 "remains\n", seg_cnt);
2931
2932 while (remseg > 0) {
2933 /* Update sg start */
2934 sg = s;
2935 /* Adjust ring index. */
2936 ha->req_ring_index++;
2937 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2938 ha->req_ring_index = 0;
2939 ha->request_ring_ptr =
2940 ha->request_ring;
2941 } else
2942 ha->request_ring_ptr++;
2943
2944 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2945
2946 /* Zero out packet. */
2947 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2948
2949 /* Load packet defaults. */
2950 ((struct cont_a64_entry *) pkt)->entry_type =
2951 CONTINUE_A64_TYPE;
2952 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2953 ((struct cont_a64_entry *) pkt)->sys_define =
2954 (uint8_t)ha->req_ring_index;
2955 /* Setup packet address segment pointer. */
2956 dword_ptr =
2957 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2958
2959 /* Load continuation entry data segments. */
2960 for_each_sg(sg, s, remseg, cnt) {
2961 if (cnt == 5)
2962 break;
2963 dma_handle = sg_dma_address(s);
2964#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2965 if (ha->flags.use_pci_vchannel)
2966 sn_pci_set_vchan(ha->pdev,
2967 (unsigned long *)&dma_handle,
2968 SCSI_BUS_32(cmd));
2969#endif
2970 *dword_ptr++ =
2971 cpu_to_le32(pci_dma_lo32(dma_handle));
2972 *dword_ptr++ =
2973 cpu_to_le32(pci_dma_hi32(dma_handle));
2974 *dword_ptr++ =
2975 cpu_to_le32(sg_dma_len(s));
2976 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2977 cpu_to_le32(pci_dma_hi32(dma_handle)),
2978 cpu_to_le32(pci_dma_lo32(dma_handle)),
2979 cpu_to_le32(sg_dma_len(s)));
2980 }
2981 remseg -= cnt;
2982 dprintk(5, "qla1280_64bit_start_scsi: "
2983 "continuation packet data - b %i, t "
2984 "%i, l %i \n", SCSI_BUS_32(cmd),
2985 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2986 qla1280_dump_buffer(5, (char *)pkt,
2987 REQUEST_ENTRY_SIZE);
2988 }
2989 } else { /* No scatter gather data transfer */
2990 dma_handle = pci_map_single(ha->pdev,
2991 cmd->request_buffer,
2992 cmd->request_bufflen,
2993 cmd->sc_data_direction);
2994
2995 sp->saved_dma_handle = dma_handle;
2996#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2997 if (ha->flags.use_pci_vchannel)
2998 sn_pci_set_vchan(ha->pdev,
2999 (unsigned long *)&dma_handle,
3000 SCSI_BUS_32(cmd));
3001#endif
3002 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
3003 *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle));
3004 *dword_ptr = cpu_to_le32(cmd->request_bufflen);
3005
3006 dprintk(5, "qla1280_64bit_start_scsi: No scatter/"
3007 "gather command packet data - b %i, t %i, "
3008 "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
3009 SCSI_LUN_32(cmd));
3010 qla1280_dump_buffer(5, (char *)pkt, 2978 qla1280_dump_buffer(5, (char *)pkt,
3011 REQUEST_ENTRY_SIZE); 2979 REQUEST_ENTRY_SIZE);
3012 } 2980 }
@@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3068 struct device_reg __iomem *reg = ha->iobase; 3036 struct device_reg __iomem *reg = ha->iobase;
3069 struct scsi_cmnd *cmd = sp->cmd; 3037 struct scsi_cmnd *cmd = sp->cmd;
3070 struct cmd_entry *pkt; 3038 struct cmd_entry *pkt;
3071 struct scatterlist *sg = NULL, *s;
3072 __le32 *dword_ptr; 3039 __le32 *dword_ptr;
3073 int status = 0; 3040 int status = 0;
3074 int cnt; 3041 int cnt;
3075 int req_cnt; 3042 int req_cnt;
3076 uint16_t seg_cnt; 3043 int seg_cnt;
3077 dma_addr_t dma_handle; 3044 dma_addr_t dma_handle;
3078 u8 dir; 3045 u8 dir;
3079 3046
@@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3083 cmd->cmnd[0]); 3050 cmd->cmnd[0]);
3084 3051
3085 /* Calculate number of entries and segments required. */ 3052 /* Calculate number of entries and segments required. */
3086 req_cnt = 1; 3053 seg_cnt = scsi_dma_map(cmd);
3087 if (cmd->use_sg) { 3054 if (seg_cnt) {
3088 /*
3089 * We must build an SG list in adapter format, as the kernel's
3090 * SG list cannot be used directly because of data field size
3091 * (__alpha__) differences and the kernel SG list uses virtual
3092 * addresses where we need physical addresses.
3093 */
3094 sg = (struct scatterlist *) cmd->request_buffer;
3095 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg,
3096 cmd->sc_data_direction);
3097
3098 /* 3055 /*
3099 * if greater than four sg entries then we need to allocate 3056 * if greater than four sg entries then we need to allocate
3100 * continuation entries 3057 * continuation entries
@@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3106 } 3063 }
3107 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", 3064 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3108 cmd, seg_cnt, req_cnt); 3065 cmd, seg_cnt, req_cnt);
3109 } else if (cmd->request_bufflen) { /* If data transfer. */ 3066 } else if (seg_cnt < 0) {
3110 dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", 3067 status = 1;
3111 SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, 3068 goto out;
3112 cmd->cmnd[0]);
3113 seg_cnt = 1;
3114 } else {
3115 /* dprintk(1, "No data transfer \n"); */
3116 seg_cnt = 0;
3117 } 3069 }
3118 3070
3119 if ((req_cnt + 2) >= ha->req_q_cnt) { 3071 if ((req_cnt + 2) >= ha->req_q_cnt) {
@@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3194 * Load data segments. 3146 * Load data segments.
3195 */ 3147 */
3196 if (seg_cnt) { 3148 if (seg_cnt) {
3149 struct scatterlist *sg, *s;
3197 int remseg = seg_cnt; 3150 int remseg = seg_cnt;
3151
3152 sg = scsi_sglist(cmd);
3153
3198 /* Setup packet address segment pointer. */ 3154 /* Setup packet address segment pointer. */
3199 dword_ptr = &pkt->dseg_0_address; 3155 dword_ptr = &pkt->dseg_0_address;
3200 3156
3201 if (cmd->use_sg) { /* If scatter gather */ 3157 dprintk(3, "Building S/G data segments..\n");
3202 dprintk(3, "Building S/G data segments..\n"); 3158 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3203 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3159
3160 /* Load command entry data segments. */
3161 for_each_sg(sg, s, seg_cnt, cnt) {
3162 if (cnt == 4)
3163 break;
3164 *dword_ptr++ =
3165 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3166 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3167 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3168 (pci_dma_lo32(sg_dma_address(s))),
3169 (sg_dma_len(s)));
3170 remseg--;
3171 }
3172 /*
3173 * Build continuation packets.
3174 */
3175 dprintk(3, "S/G Building Continuation"
3176 "...seg_cnt=0x%x remains\n", seg_cnt);
3177 while (remseg > 0) {
3178 /* Continue from end point */
3179 sg = s;
3180 /* Adjust ring index. */
3181 ha->req_ring_index++;
3182 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3183 ha->req_ring_index = 0;
3184 ha->request_ring_ptr =
3185 ha->request_ring;
3186 } else
3187 ha->request_ring_ptr++;
3188
3189 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3190
3191 /* Zero out packet. */
3192 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3193
3194 /* Load packet defaults. */
3195 ((struct cont_entry *) pkt)->
3196 entry_type = CONTINUE_TYPE;
3197 ((struct cont_entry *) pkt)->entry_count = 1;
3204 3198
3205 /* Load command entry data segments. */ 3199 ((struct cont_entry *) pkt)->sys_define =
3206 for_each_sg(sg, s, seg_cnt, cnt) { 3200 (uint8_t) ha->req_ring_index;
3207 if (cnt == 4) 3201
3202 /* Setup packet address segment pointer. */
3203 dword_ptr =
3204 &((struct cont_entry *) pkt)->dseg_0_address;
3205
3206 /* Load continuation entry data segments. */
3207 for_each_sg(sg, s, remseg, cnt) {
3208 if (cnt == 7)
3208 break; 3209 break;
3209 *dword_ptr++ = 3210 *dword_ptr++ =
3210 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3211 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3211 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 3212 *dword_ptr++ =
3212 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3213 cpu_to_le32(sg_dma_len(s));
3213 (pci_dma_lo32(sg_dma_address(s))), 3214 dprintk(1,
3214 (sg_dma_len(s))); 3215 "S/G Segment Cont. phys_addr=0x%x, "
3215 remseg--; 3216 "len=0x%x\n",
3216 } 3217 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3217 /* 3218 cpu_to_le32(sg_dma_len(s)));
3218 * Build continuation packets.
3219 */
3220 dprintk(3, "S/G Building Continuation"
3221 "...seg_cnt=0x%x remains\n", seg_cnt);
3222 while (remseg > 0) {
3223 /* Continue from end point */
3224 sg = s;
3225 /* Adjust ring index. */
3226 ha->req_ring_index++;
3227 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3228 ha->req_ring_index = 0;
3229 ha->request_ring_ptr =
3230 ha->request_ring;
3231 } else
3232 ha->request_ring_ptr++;
3233
3234 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3235
3236 /* Zero out packet. */
3237 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3238
3239 /* Load packet defaults. */
3240 ((struct cont_entry *) pkt)->
3241 entry_type = CONTINUE_TYPE;
3242 ((struct cont_entry *) pkt)->entry_count = 1;
3243
3244 ((struct cont_entry *) pkt)->sys_define =
3245 (uint8_t) ha->req_ring_index;
3246
3247 /* Setup packet address segment pointer. */
3248 dword_ptr =
3249 &((struct cont_entry *) pkt)->dseg_0_address;
3250
3251 /* Load continuation entry data segments. */
3252 for_each_sg(sg, s, remseg, cnt) {
3253 if (cnt == 7)
3254 break;
3255 *dword_ptr++ =
3256 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3257 *dword_ptr++ =
3258 cpu_to_le32(sg_dma_len(s));
3259 dprintk(1,
3260 "S/G Segment Cont. phys_addr=0x%x, "
3261 "len=0x%x\n",
3262 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3263 cpu_to_le32(sg_dma_len(s)));
3264 }
3265 remseg -= cnt;
3266 dprintk(5, "qla1280_32bit_start_scsi: "
3267 "continuation packet data - "
3268 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3269 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3270 qla1280_dump_buffer(5, (char *)pkt,
3271 REQUEST_ENTRY_SIZE);
3272 } 3219 }
3273 } else { /* No S/G data transfer */ 3220 remseg -= cnt;
3274 dma_handle = pci_map_single(ha->pdev, 3221 dprintk(5, "qla1280_32bit_start_scsi: "
3275 cmd->request_buffer, 3222 "continuation packet data - "
3276 cmd->request_bufflen, 3223 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3277 cmd->sc_data_direction); 3224 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3278 sp->saved_dma_handle = dma_handle; 3225 qla1280_dump_buffer(5, (char *)pkt,
3279 3226 REQUEST_ENTRY_SIZE);
3280 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
3281 *dword_ptr = cpu_to_le32(cmd->request_bufflen);
3282 } 3227 }
3283 } else { /* No data transfer at all */ 3228 } else { /* No data transfer at all */
3284 dprintk(5, "qla1280_32bit_start_scsi: No data, command " 3229 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
@@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4086 for (i = 0; i < cmd->cmd_len; i++) { 4031 for (i = 0; i < cmd->cmd_len; i++) {
4087 printk("0x%02x ", cmd->cmnd[i]); 4032 printk("0x%02x ", cmd->cmnd[i]);
4088 } 4033 }
4089 printk(" seg_cnt =%d\n", cmd->use_sg); 4034 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4090 printk(" request buffer=0x%p, request buffer len=0x%x\n", 4035 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4091 cmd->request_buffer, cmd->request_bufflen); 4036 scsi_sglist(cmd), scsi_bufflen(cmd));
4092 /* if (cmd->use_sg) 4037 /* if (cmd->use_sg)
4093 { 4038 {
4094 sg = (struct scatterlist *) cmd->request_buffer; 4039 sg = (struct scatterlist *) cmd->request_buffer;