aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-25 12:55:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-31 13:23:49 -0400
commit385d70b4e2659ae525a00e46a9f97146949cfc14 (patch)
tree35f34aefd41261f6bc2f4c31d308e1d0a75c4be0 /drivers
parentb1192d5ebab2f1664295a748b6ee6e89f3b07188 (diff)
[SCSI] qla2xxx: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c232
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
4 files changed, 107 insertions, 181 deletions
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6ed6962bc2b..996c47a63074 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1411,9 +1411,9 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
1411 printk("0x%02x ", cmd->cmnd[i]); 1411 printk("0x%02x ", cmd->cmnd[i]);
1412 } 1412 }
1413 printk("\n seg_cnt=%d, allowed=%d, retries=%d\n", 1413 printk("\n seg_cnt=%d, allowed=%d, retries=%d\n",
1414 cmd->use_sg, cmd->allowed, cmd->retries); 1414 scsi_sg_count(cmd), cmd->allowed, cmd->retries);
1415 printk(" request buffer=0x%p, request buffer len=0x%x\n", 1415 printk(" request buffer=0x%p, request buffer len=0x%x\n",
1416 cmd->request_buffer, cmd->request_bufflen); 1416 scsi_sglist(cmd), scsi_bufflen(cmd));
1417 printk(" tag=%d, transfersize=0x%x\n", 1417 printk(" tag=%d, transfersize=0x%x\n",
1418 cmd->tag, cmd->transfersize); 1418 cmd->tag, cmd->transfersize);
1419 printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp); 1419 printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5b3c610a32a..c517a1478e44 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155 uint32_t *cur_dsd; 155 uint32_t *cur_dsd;
156 scsi_qla_host_t *ha; 156 scsi_qla_host_t *ha;
157 struct scsi_cmnd *cmd; 157 struct scsi_cmnd *cmd;
158 struct scatterlist *sg;
159 int i;
158 160
159 cmd = sp->cmd; 161 cmd = sp->cmd;
160 162
@@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
163 __constant_cpu_to_le32(COMMAND_TYPE); 165 __constant_cpu_to_le32(COMMAND_TYPE);
164 166
165 /* No data transfer */ 167 /* No data transfer */
166 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 168 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
167 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 169 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
168 return; 170 return;
169 } 171 }
@@ -177,35 +179,24 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
177 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 179 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
178 180
179 /* Load data segments */ 181 /* Load data segments */
180 if (cmd->use_sg != 0) { 182
181 struct scatterlist *cur_seg; 183 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
182 struct scatterlist *end_seg; 184 cont_entry_t *cont_pkt;
183 185
184 cur_seg = (struct scatterlist *)cmd->request_buffer; 186 /* Allocate additional continuation packets? */
185 end_seg = cur_seg + tot_dsds; 187 if (avail_dsds == 0) {
186 while (cur_seg < end_seg) { 188 /*
187 cont_entry_t *cont_pkt; 189 * Seven DSDs are available in the Continuation
188 190 * Type 0 IOCB.
189 /* Allocate additional continuation packets? */ 191 */
190 if (avail_dsds == 0) { 192 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
191 /* 193 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
192 * Seven DSDs are available in the Continuation 194 avail_dsds = 7;
193 * Type 0 IOCB.
194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
198 }
199
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
202 avail_dsds--;
203
204 cur_seg++;
205 } 195 }
206 } else { 196
207 *cur_dsd++ = cpu_to_le32(sp->dma_handle); 197 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
208 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 198 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
199 avail_dsds--;
209 } 200 }
210} 201}
211 202
@@ -224,6 +215,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
224 uint32_t *cur_dsd; 215 uint32_t *cur_dsd;
225 scsi_qla_host_t *ha; 216 scsi_qla_host_t *ha;
226 struct scsi_cmnd *cmd; 217 struct scsi_cmnd *cmd;
218 struct scatterlist *sg;
219 int i;
227 220
228 cmd = sp->cmd; 221 cmd = sp->cmd;
229 222
@@ -232,7 +225,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
232 __constant_cpu_to_le32(COMMAND_A64_TYPE); 225 __constant_cpu_to_le32(COMMAND_A64_TYPE);
233 226
234 /* No data transfer */ 227 /* No data transfer */
235 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 228 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
236 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 229 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
237 return; 230 return;
238 } 231 }
@@ -246,39 +239,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
246 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 239 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
247 240
248 /* Load data segments */ 241 /* Load data segments */
249 if (cmd->use_sg != 0) { 242 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
250 struct scatterlist *cur_seg; 243 dma_addr_t sle_dma;
251 struct scatterlist *end_seg; 244 cont_a64_entry_t *cont_pkt;
252 245
253 cur_seg = (struct scatterlist *)cmd->request_buffer; 246 /* Allocate additional continuation packets? */
254 end_seg = cur_seg + tot_dsds; 247 if (avail_dsds == 0) {
255 while (cur_seg < end_seg) { 248 /*
256 dma_addr_t sle_dma; 249 * Five DSDs are available in the Continuation
257 cont_a64_entry_t *cont_pkt; 250 * Type 1 IOCB.
258 251 */
259 /* Allocate additional continuation packets? */ 252 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
260 if (avail_dsds == 0) { 253 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
261 /* 254 avail_dsds = 5;
262 * Five DSDs are available in the Continuation
263 * Type 1 IOCB.
264 */
265 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
266 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
267 avail_dsds = 5;
268 }
269
270 sle_dma = sg_dma_address(cur_seg);
271 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
272 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
273 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
274 avail_dsds--;
275
276 cur_seg++;
277 } 255 }
278 } else { 256
279 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 257 sle_dma = sg_dma_address(sg);
280 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 258 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
281 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 259 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
260 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
261 avail_dsds--;
282 } 262 }
283} 263}
284 264
@@ -291,7 +271,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
291int 271int
292qla2x00_start_scsi(srb_t *sp) 272qla2x00_start_scsi(srb_t *sp)
293{ 273{
294 int ret; 274 int ret, nseg;
295 unsigned long flags; 275 unsigned long flags;
296 scsi_qla_host_t *ha; 276 scsi_qla_host_t *ha;
297 struct scsi_cmnd *cmd; 277 struct scsi_cmnd *cmd;
@@ -299,7 +279,6 @@ qla2x00_start_scsi(srb_t *sp)
299 uint32_t index; 279 uint32_t index;
300 uint32_t handle; 280 uint32_t handle;
301 cmd_entry_t *cmd_pkt; 281 cmd_entry_t *cmd_pkt;
302 struct scatterlist *sg;
303 uint16_t cnt; 282 uint16_t cnt;
304 uint16_t req_cnt; 283 uint16_t req_cnt;
305 uint16_t tot_dsds; 284 uint16_t tot_dsds;
@@ -337,23 +316,10 @@ qla2x00_start_scsi(srb_t *sp)
337 goto queuing_error; 316 goto queuing_error;
338 317
339 /* Map the sg table so we have an accurate count of sg entries needed */ 318 /* Map the sg table so we have an accurate count of sg entries needed */
340 if (cmd->use_sg) { 319 nseg = scsi_dma_map(cmd);
341 sg = (struct scatterlist *) cmd->request_buffer; 320 if (nseg < 0)
342 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 321 goto queuing_error;
343 cmd->sc_data_direction); 322 tot_dsds = nseg;
344 if (tot_dsds == 0)
345 goto queuing_error;
346 } else if (cmd->request_bufflen) {
347 dma_addr_t req_dma;
348
349 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
350 cmd->request_bufflen, cmd->sc_data_direction);
351 if (dma_mapping_error(req_dma))
352 goto queuing_error;
353
354 sp->dma_handle = req_dma;
355 tot_dsds = 1;
356 }
357 323
358 /* Calculate the number of request entries needed. */ 324 /* Calculate the number of request entries needed. */
359 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); 325 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
@@ -391,7 +357,7 @@ qla2x00_start_scsi(srb_t *sp)
391 357
392 /* Load SCSI command packet. */ 358 /* Load SCSI command packet. */
393 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 359 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
394 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 360 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
395 361
396 /* Build IOCB segments */ 362 /* Build IOCB segments */
397 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); 363 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
@@ -423,14 +389,9 @@ qla2x00_start_scsi(srb_t *sp)
423 return (QLA_SUCCESS); 389 return (QLA_SUCCESS);
424 390
425queuing_error: 391queuing_error:
426 if (cmd->use_sg && tot_dsds) { 392 if (tot_dsds)
427 sg = (struct scatterlist *) cmd->request_buffer; 393 scsi_dma_unmap(cmd);
428 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 394
429 cmd->sc_data_direction);
430 } else if (tot_dsds) {
431 pci_unmap_single(ha->pdev, sp->dma_handle,
432 cmd->request_bufflen, cmd->sc_data_direction);
433 }
434 spin_unlock_irqrestore(&ha->hardware_lock, flags); 395 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 396
436 return (QLA_FUNCTION_FAILED); 397 return (QLA_FUNCTION_FAILED);
@@ -642,6 +603,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
642 uint32_t *cur_dsd; 603 uint32_t *cur_dsd;
643 scsi_qla_host_t *ha; 604 scsi_qla_host_t *ha;
644 struct scsi_cmnd *cmd; 605 struct scsi_cmnd *cmd;
606 struct scatterlist *sg;
607 int i;
645 608
646 cmd = sp->cmd; 609 cmd = sp->cmd;
647 610
@@ -650,7 +613,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
650 __constant_cpu_to_le32(COMMAND_TYPE_7); 613 __constant_cpu_to_le32(COMMAND_TYPE_7);
651 614
652 /* No data transfer */ 615 /* No data transfer */
653 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 616 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
654 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 617 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
655 return; 618 return;
656 } 619 }
@@ -670,39 +633,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
670 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 633 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
671 634
672 /* Load data segments */ 635 /* Load data segments */
673 if (cmd->use_sg != 0) { 636
674 struct scatterlist *cur_seg; 637 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
675 struct scatterlist *end_seg; 638 dma_addr_t sle_dma;
676 639 cont_a64_entry_t *cont_pkt;
677 cur_seg = (struct scatterlist *)cmd->request_buffer; 640
678 end_seg = cur_seg + tot_dsds; 641 /* Allocate additional continuation packets? */
679 while (cur_seg < end_seg) { 642 if (avail_dsds == 0) {
680 dma_addr_t sle_dma; 643 /*
681 cont_a64_entry_t *cont_pkt; 644 * Five DSDs are available in the Continuation
682 645 * Type 1 IOCB.
683 /* Allocate additional continuation packets? */ 646 */
684 if (avail_dsds == 0) { 647 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
685 /* 648 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
686 * Five DSDs are available in the Continuation 649 avail_dsds = 5;
687 * Type 1 IOCB.
688 */
689 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
690 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
691 avail_dsds = 5;
692 }
693
694 sle_dma = sg_dma_address(cur_seg);
695 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
696 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
697 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
698 avail_dsds--;
699
700 cur_seg++;
701 } 650 }
702 } else { 651
703 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 652 sle_dma = sg_dma_address(sg);
704 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 653 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
705 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 654 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
656 avail_dsds--;
706 } 657 }
707} 658}
708 659
@@ -716,7 +667,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
716int 667int
717qla24xx_start_scsi(srb_t *sp) 668qla24xx_start_scsi(srb_t *sp)
718{ 669{
719 int ret; 670 int ret, nseg;
720 unsigned long flags; 671 unsigned long flags;
721 scsi_qla_host_t *ha; 672 scsi_qla_host_t *ha;
722 struct scsi_cmnd *cmd; 673 struct scsi_cmnd *cmd;
@@ -724,7 +675,6 @@ qla24xx_start_scsi(srb_t *sp)
724 uint32_t index; 675 uint32_t index;
725 uint32_t handle; 676 uint32_t handle;
726 struct cmd_type_7 *cmd_pkt; 677 struct cmd_type_7 *cmd_pkt;
727 struct scatterlist *sg;
728 uint16_t cnt; 678 uint16_t cnt;
729 uint16_t req_cnt; 679 uint16_t req_cnt;
730 uint16_t tot_dsds; 680 uint16_t tot_dsds;
@@ -762,23 +712,10 @@ qla24xx_start_scsi(srb_t *sp)
762 goto queuing_error; 712 goto queuing_error;
763 713
764 /* Map the sg table so we have an accurate count of sg entries needed */ 714 /* Map the sg table so we have an accurate count of sg entries needed */
765 if (cmd->use_sg) { 715 nseg = scsi_dma_map(cmd);
766 sg = (struct scatterlist *) cmd->request_buffer; 716 if (nseg < 0)
767 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
768 cmd->sc_data_direction);
769 if (tot_dsds == 0)
770 goto queuing_error; 717 goto queuing_error;
771 } else if (cmd->request_bufflen) { 718 tot_dsds = nseg;
772 dma_addr_t req_dma;
773
774 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
775 cmd->request_bufflen, cmd->sc_data_direction);
776 if (dma_mapping_error(req_dma))
777 goto queuing_error;
778
779 sp->dma_handle = req_dma;
780 tot_dsds = 1;
781 }
782 719
783 req_cnt = qla24xx_calc_iocbs(tot_dsds); 720 req_cnt = qla24xx_calc_iocbs(tot_dsds);
784 if (ha->req_q_cnt < (req_cnt + 2)) { 721 if (ha->req_q_cnt < (req_cnt + 2)) {
@@ -821,7 +758,7 @@ qla24xx_start_scsi(srb_t *sp)
821 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 758 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
822 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 759 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
823 760
824 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 761 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
825 762
826 /* Build IOCB segments */ 763 /* Build IOCB segments */
827 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 764 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
@@ -853,14 +790,9 @@ qla24xx_start_scsi(srb_t *sp)
853 return QLA_SUCCESS; 790 return QLA_SUCCESS;
854 791
855queuing_error: 792queuing_error:
856 if (cmd->use_sg && tot_dsds) { 793 if (tot_dsds)
857 sg = (struct scatterlist *) cmd->request_buffer; 794 scsi_dma_unmap(cmd);
858 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 795
859 cmd->sc_data_direction);
860 } else if (tot_dsds) {
861 pci_unmap_single(ha->pdev, sp->dma_handle,
862 cmd->request_bufflen, cmd->sc_data_direction);
863 }
864 spin_unlock_irqrestore(&ha->hardware_lock, flags); 796 spin_unlock_irqrestore(&ha->hardware_lock, flags);
865 797
866 return QLA_FUNCTION_FAILED; 798 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0f04258becbf..6ce532cdc4c1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -889,19 +889,19 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
889 } 889 }
890 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 890 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
891 resid = resid_len; 891 resid = resid_len;
892 cp->resid = resid; 892 scsi_set_resid(cp, resid);
893 CMD_RESID_LEN(cp) = resid; 893 CMD_RESID_LEN(cp) = resid;
894 894
895 if (!lscsi_status && 895 if (!lscsi_status &&
896 ((unsigned)(cp->request_bufflen - resid) < 896 ((unsigned)(scsi_bufflen(cp) - resid) <
897 cp->underflow)) { 897 cp->underflow)) {
898 qla_printk(KERN_INFO, ha, 898 qla_printk(KERN_INFO, ha,
899 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 899 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
900 "detected (%x of %x bytes)...returning " 900 "detected (%x of %x bytes)...returning "
901 "error status.\n", ha->host_no, 901 "error status.\n", ha->host_no,
902 cp->device->channel, cp->device->id, 902 cp->device->channel, cp->device->id,
903 cp->device->lun, resid, 903 cp->device->lun, resid,
904 cp->request_bufflen); 904 scsi_bufflen(cp));
905 905
906 cp->result = DID_ERROR << 16; 906 cp->result = DID_ERROR << 16;
907 break; 907 break;
@@ -963,7 +963,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
963 resid = fw_resid_len; 963 resid = fw_resid_len;
964 964
965 if (scsi_status & SS_RESIDUAL_UNDER) { 965 if (scsi_status & SS_RESIDUAL_UNDER) {
966 cp->resid = resid; 966 scsi_set_resid(cp, resid);
967 CMD_RESID_LEN(cp) = resid; 967 CMD_RESID_LEN(cp) = resid;
968 } else { 968 } else {
969 DEBUG2(printk(KERN_INFO 969 DEBUG2(printk(KERN_INFO
@@ -1042,26 +1042,26 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1042 */ 1042 */
1043 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1043 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1044 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1044 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1045 "frame(s) detected (%x of %x bytes)..." 1045 "frame(s) detected (%x of %x bytes)..."
1046 "retrying command.\n", ha->host_no, 1046 "retrying command.\n", ha->host_no,
1047 cp->device->channel, cp->device->id, 1047 cp->device->channel, cp->device->id,
1048 cp->device->lun, resid, 1048 cp->device->lun, resid,
1049 cp->request_bufflen)); 1049 scsi_bufflen(cp)));
1050 1050
1051 cp->result = DID_BUS_BUSY << 16; 1051 cp->result = DID_BUS_BUSY << 16;
1052 break; 1052 break;
1053 } 1053 }
1054 1054
1055 /* Handle mid-layer underflow */ 1055 /* Handle mid-layer underflow */
1056 if ((unsigned)(cp->request_bufflen - resid) < 1056 if ((unsigned)(scsi_bufflen(cp) - resid) <
1057 cp->underflow) { 1057 cp->underflow) {
1058 qla_printk(KERN_INFO, ha, 1058 qla_printk(KERN_INFO, ha,
1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1060 "detected (%x of %x bytes)...returning " 1060 "detected (%x of %x bytes)...returning "
1061 "error status.\n", ha->host_no, 1061 "error status.\n", ha->host_no,
1062 cp->device->channel, cp->device->id, 1062 cp->device->channel, cp->device->id,
1063 cp->device->lun, resid, 1063 cp->device->lun, resid,
1064 cp->request_bufflen); 1064 scsi_bufflen(cp));
1065 1065
1066 cp->result = DID_ERROR << 16; 1066 cp->result = DID_ERROR << 16;
1067 break; 1067 break;
@@ -1084,7 +1084,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1084 DEBUG2(printk(KERN_INFO 1084 DEBUG2(printk(KERN_INFO
1085 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR " 1085 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1086 "status!\n", 1086 "status!\n",
1087 cp->serial_number, cp->request_bufflen, resid_len)); 1087 cp->serial_number, scsi_bufflen(cp), resid_len));
1088 1088
1089 cp->result = DID_ERROR << 16; 1089 cp->result = DID_ERROR << 16;
1090 break; 1090 break;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dd076da86a46..18baa5bf69c4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2426,13 +2426,7 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2426 struct scsi_cmnd *cmd = sp->cmd; 2426 struct scsi_cmnd *cmd = sp->cmd;
2427 2427
2428 if (sp->flags & SRB_DMA_VALID) { 2428 if (sp->flags & SRB_DMA_VALID) {
2429 if (cmd->use_sg) { 2429 scsi_dma_unmap(cmd);
2430 dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
2431 cmd->use_sg, cmd->sc_data_direction);
2432 } else if (cmd->request_bufflen) {
2433 dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
2434 cmd->request_bufflen, cmd->sc_data_direction);
2435 }
2436 sp->flags &= ~SRB_DMA_VALID; 2430 sp->flags &= ~SRB_DMA_VALID;
2437 } 2431 }
2438 CMD_SP(cmd) = NULL; 2432 CMD_SP(cmd) = NULL;