aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-25 12:55:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-31 13:23:49 -0400
commit385d70b4e2659ae525a00e46a9f97146949cfc14 (patch)
tree35f34aefd41261f6bc2f4c31d308e1d0a75c4be0 /drivers/scsi/qla2xxx/qla_iocb.c
parentb1192d5ebab2f1664295a748b6ee6e89f3b07188 (diff)
[SCSI] qla2xxx: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c232
1 files changed, 82 insertions, 150 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5b3c610a32a..c517a1478e44 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155 uint32_t *cur_dsd; 155 uint32_t *cur_dsd;
156 scsi_qla_host_t *ha; 156 scsi_qla_host_t *ha;
157 struct scsi_cmnd *cmd; 157 struct scsi_cmnd *cmd;
158 struct scatterlist *sg;
159 int i;
158 160
159 cmd = sp->cmd; 161 cmd = sp->cmd;
160 162
@@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
163 __constant_cpu_to_le32(COMMAND_TYPE); 165 __constant_cpu_to_le32(COMMAND_TYPE);
164 166
165 /* No data transfer */ 167 /* No data transfer */
166 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 168 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
167 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 169 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
168 return; 170 return;
169 } 171 }
@@ -177,35 +179,24 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
177 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 179 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
178 180
179 /* Load data segments */ 181 /* Load data segments */
180 if (cmd->use_sg != 0) { 182
181 struct scatterlist *cur_seg; 183 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
182 struct scatterlist *end_seg; 184 cont_entry_t *cont_pkt;
183 185
184 cur_seg = (struct scatterlist *)cmd->request_buffer; 186 /* Allocate additional continuation packets? */
185 end_seg = cur_seg + tot_dsds; 187 if (avail_dsds == 0) {
186 while (cur_seg < end_seg) { 188 /*
187 cont_entry_t *cont_pkt; 189 * Seven DSDs are available in the Continuation
188 190 * Type 0 IOCB.
189 /* Allocate additional continuation packets? */ 191 */
190 if (avail_dsds == 0) { 192 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
191 /* 193 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
192 * Seven DSDs are available in the Continuation 194 avail_dsds = 7;
193 * Type 0 IOCB.
194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
198 }
199
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
202 avail_dsds--;
203
204 cur_seg++;
205 } 195 }
206 } else { 196
207 *cur_dsd++ = cpu_to_le32(sp->dma_handle); 197 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
208 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 198 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
199 avail_dsds--;
209 } 200 }
210} 201}
211 202
@@ -224,6 +215,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
224 uint32_t *cur_dsd; 215 uint32_t *cur_dsd;
225 scsi_qla_host_t *ha; 216 scsi_qla_host_t *ha;
226 struct scsi_cmnd *cmd; 217 struct scsi_cmnd *cmd;
218 struct scatterlist *sg;
219 int i;
227 220
228 cmd = sp->cmd; 221 cmd = sp->cmd;
229 222
@@ -232,7 +225,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
232 __constant_cpu_to_le32(COMMAND_A64_TYPE); 225 __constant_cpu_to_le32(COMMAND_A64_TYPE);
233 226
234 /* No data transfer */ 227 /* No data transfer */
235 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 228 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
236 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 229 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
237 return; 230 return;
238 } 231 }
@@ -246,39 +239,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
246 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 239 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
247 240
248 /* Load data segments */ 241 /* Load data segments */
249 if (cmd->use_sg != 0) { 242 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
250 struct scatterlist *cur_seg; 243 dma_addr_t sle_dma;
251 struct scatterlist *end_seg; 244 cont_a64_entry_t *cont_pkt;
252 245
253 cur_seg = (struct scatterlist *)cmd->request_buffer; 246 /* Allocate additional continuation packets? */
254 end_seg = cur_seg + tot_dsds; 247 if (avail_dsds == 0) {
255 while (cur_seg < end_seg) { 248 /*
256 dma_addr_t sle_dma; 249 * Five DSDs are available in the Continuation
257 cont_a64_entry_t *cont_pkt; 250 * Type 1 IOCB.
258 251 */
259 /* Allocate additional continuation packets? */ 252 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
260 if (avail_dsds == 0) { 253 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
261 /* 254 avail_dsds = 5;
262 * Five DSDs are available in the Continuation
263 * Type 1 IOCB.
264 */
265 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
266 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
267 avail_dsds = 5;
268 }
269
270 sle_dma = sg_dma_address(cur_seg);
271 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
272 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
273 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
274 avail_dsds--;
275
276 cur_seg++;
277 } 255 }
278 } else { 256
279 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 257 sle_dma = sg_dma_address(sg);
280 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 258 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
281 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 259 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
260 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
261 avail_dsds--;
282 } 262 }
283} 263}
284 264
@@ -291,7 +271,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
291int 271int
292qla2x00_start_scsi(srb_t *sp) 272qla2x00_start_scsi(srb_t *sp)
293{ 273{
294 int ret; 274 int ret, nseg;
295 unsigned long flags; 275 unsigned long flags;
296 scsi_qla_host_t *ha; 276 scsi_qla_host_t *ha;
297 struct scsi_cmnd *cmd; 277 struct scsi_cmnd *cmd;
@@ -299,7 +279,6 @@ qla2x00_start_scsi(srb_t *sp)
299 uint32_t index; 279 uint32_t index;
300 uint32_t handle; 280 uint32_t handle;
301 cmd_entry_t *cmd_pkt; 281 cmd_entry_t *cmd_pkt;
302 struct scatterlist *sg;
303 uint16_t cnt; 282 uint16_t cnt;
304 uint16_t req_cnt; 283 uint16_t req_cnt;
305 uint16_t tot_dsds; 284 uint16_t tot_dsds;
@@ -337,23 +316,10 @@ qla2x00_start_scsi(srb_t *sp)
337 goto queuing_error; 316 goto queuing_error;
338 317
339 /* Map the sg table so we have an accurate count of sg entries needed */ 318 /* Map the sg table so we have an accurate count of sg entries needed */
340 if (cmd->use_sg) { 319 nseg = scsi_dma_map(cmd);
341 sg = (struct scatterlist *) cmd->request_buffer; 320 if (nseg < 0)
342 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 321 goto queuing_error;
343 cmd->sc_data_direction); 322 tot_dsds = nseg;
344 if (tot_dsds == 0)
345 goto queuing_error;
346 } else if (cmd->request_bufflen) {
347 dma_addr_t req_dma;
348
349 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
350 cmd->request_bufflen, cmd->sc_data_direction);
351 if (dma_mapping_error(req_dma))
352 goto queuing_error;
353
354 sp->dma_handle = req_dma;
355 tot_dsds = 1;
356 }
357 323
358 /* Calculate the number of request entries needed. */ 324 /* Calculate the number of request entries needed. */
359 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); 325 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
@@ -391,7 +357,7 @@ qla2x00_start_scsi(srb_t *sp)
391 357
392 /* Load SCSI command packet. */ 358 /* Load SCSI command packet. */
393 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 359 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
394 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 360 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
395 361
396 /* Build IOCB segments */ 362 /* Build IOCB segments */
397 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); 363 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
@@ -423,14 +389,9 @@ qla2x00_start_scsi(srb_t *sp)
423 return (QLA_SUCCESS); 389 return (QLA_SUCCESS);
424 390
425queuing_error: 391queuing_error:
426 if (cmd->use_sg && tot_dsds) { 392 if (tot_dsds)
427 sg = (struct scatterlist *) cmd->request_buffer; 393 scsi_dma_unmap(cmd);
428 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 394
429 cmd->sc_data_direction);
430 } else if (tot_dsds) {
431 pci_unmap_single(ha->pdev, sp->dma_handle,
432 cmd->request_bufflen, cmd->sc_data_direction);
433 }
434 spin_unlock_irqrestore(&ha->hardware_lock, flags); 395 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 396
436 return (QLA_FUNCTION_FAILED); 397 return (QLA_FUNCTION_FAILED);
@@ -642,6 +603,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
642 uint32_t *cur_dsd; 603 uint32_t *cur_dsd;
643 scsi_qla_host_t *ha; 604 scsi_qla_host_t *ha;
644 struct scsi_cmnd *cmd; 605 struct scsi_cmnd *cmd;
606 struct scatterlist *sg;
607 int i;
645 608
646 cmd = sp->cmd; 609 cmd = sp->cmd;
647 610
@@ -650,7 +613,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
650 __constant_cpu_to_le32(COMMAND_TYPE_7); 613 __constant_cpu_to_le32(COMMAND_TYPE_7);
651 614
652 /* No data transfer */ 615 /* No data transfer */
653 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 616 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
654 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 617 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
655 return; 618 return;
656 } 619 }
@@ -670,39 +633,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
670 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 633 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
671 634
672 /* Load data segments */ 635 /* Load data segments */
673 if (cmd->use_sg != 0) { 636
674 struct scatterlist *cur_seg; 637 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
675 struct scatterlist *end_seg; 638 dma_addr_t sle_dma;
676 639 cont_a64_entry_t *cont_pkt;
677 cur_seg = (struct scatterlist *)cmd->request_buffer; 640
678 end_seg = cur_seg + tot_dsds; 641 /* Allocate additional continuation packets? */
679 while (cur_seg < end_seg) { 642 if (avail_dsds == 0) {
680 dma_addr_t sle_dma; 643 /*
681 cont_a64_entry_t *cont_pkt; 644 * Five DSDs are available in the Continuation
682 645 * Type 1 IOCB.
683 /* Allocate additional continuation packets? */ 646 */
684 if (avail_dsds == 0) { 647 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
685 /* 648 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
686 * Five DSDs are available in the Continuation 649 avail_dsds = 5;
687 * Type 1 IOCB.
688 */
689 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
690 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
691 avail_dsds = 5;
692 }
693
694 sle_dma = sg_dma_address(cur_seg);
695 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
696 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
697 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
698 avail_dsds--;
699
700 cur_seg++;
701 } 650 }
702 } else { 651
703 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 652 sle_dma = sg_dma_address(sg);
704 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 653 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
705 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 654 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
656 avail_dsds--;
706 } 657 }
707} 658}
708 659
@@ -716,7 +667,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
716int 667int
717qla24xx_start_scsi(srb_t *sp) 668qla24xx_start_scsi(srb_t *sp)
718{ 669{
719 int ret; 670 int ret, nseg;
720 unsigned long flags; 671 unsigned long flags;
721 scsi_qla_host_t *ha; 672 scsi_qla_host_t *ha;
722 struct scsi_cmnd *cmd; 673 struct scsi_cmnd *cmd;
@@ -724,7 +675,6 @@ qla24xx_start_scsi(srb_t *sp)
724 uint32_t index; 675 uint32_t index;
725 uint32_t handle; 676 uint32_t handle;
726 struct cmd_type_7 *cmd_pkt; 677 struct cmd_type_7 *cmd_pkt;
727 struct scatterlist *sg;
728 uint16_t cnt; 678 uint16_t cnt;
729 uint16_t req_cnt; 679 uint16_t req_cnt;
730 uint16_t tot_dsds; 680 uint16_t tot_dsds;
@@ -762,23 +712,10 @@ qla24xx_start_scsi(srb_t *sp)
762 goto queuing_error; 712 goto queuing_error;
763 713
764 /* Map the sg table so we have an accurate count of sg entries needed */ 714 /* Map the sg table so we have an accurate count of sg entries needed */
765 if (cmd->use_sg) { 715 nseg = scsi_dma_map(cmd);
766 sg = (struct scatterlist *) cmd->request_buffer; 716 if (nseg < 0)
767 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
768 cmd->sc_data_direction);
769 if (tot_dsds == 0)
770 goto queuing_error; 717 goto queuing_error;
771 } else if (cmd->request_bufflen) { 718 tot_dsds = nseg;
772 dma_addr_t req_dma;
773
774 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
775 cmd->request_bufflen, cmd->sc_data_direction);
776 if (dma_mapping_error(req_dma))
777 goto queuing_error;
778
779 sp->dma_handle = req_dma;
780 tot_dsds = 1;
781 }
782 719
783 req_cnt = qla24xx_calc_iocbs(tot_dsds); 720 req_cnt = qla24xx_calc_iocbs(tot_dsds);
784 if (ha->req_q_cnt < (req_cnt + 2)) { 721 if (ha->req_q_cnt < (req_cnt + 2)) {
@@ -821,7 +758,7 @@ qla24xx_start_scsi(srb_t *sp)
821 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 758 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
822 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 759 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
823 760
824 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 761 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
825 762
826 /* Build IOCB segments */ 763 /* Build IOCB segments */
827 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 764 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
@@ -853,14 +790,9 @@ qla24xx_start_scsi(srb_t *sp)
853 return QLA_SUCCESS; 790 return QLA_SUCCESS;
854 791
855queuing_error: 792queuing_error:
856 if (cmd->use_sg && tot_dsds) { 793 if (tot_dsds)
857 sg = (struct scatterlist *) cmd->request_buffer; 794 scsi_dma_unmap(cmd);
858 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 795
859 cmd->sc_data_direction);
860 } else if (tot_dsds) {
861 pci_unmap_single(ha->pdev, sp->dma_handle,
862 cmd->request_bufflen, cmd->sc_data_direction);
863 }
864 spin_unlock_irqrestore(&ha->hardware_lock, flags); 796 spin_unlock_irqrestore(&ha->hardware_lock, flags);
865 797
866 return QLA_FUNCTION_FAILED; 798 return QLA_FUNCTION_FAILED;