aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-06-17 20:56:39 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 23:40:31 -0400
commita0b4f78f9a4c869e9b29f254054ad7441cb40bbf (patch)
tree272c5233cbf1601317cecb002fb34bddf0d0ab59 /drivers/scsi
parent858c9f6c19c6f9bf86cbbc64ce0d17c61d6131b8 (diff)
[SCSI] lpfc: convert to use the data buffer accessors
This patch is a reworked version of the data buffer accessors patch so that it applies on the NPIV sources. The original patch was developed and submitted by Fujita Tomonori: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> http://marc.info/?l=linux-scsi&m=117896446832171&w=2 - remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c111
1 files changed, 28 insertions, 83 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2de4c4e1cd80..5d2e3de7de96 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -321,13 +321,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
321 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 321 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
322 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 322 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
324 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
325 ? lpfc_cmd->cur_iocbq.vport->vpi
326 : 0);
327 dma_addr_t physaddr; 324 dma_addr_t physaddr;
328 uint32_t i, num_bde = 0; 325 uint32_t i, num_bde = 0;
329 int datadir = scsi_cmnd->sc_data_direction; 326 int nseg, datadir = scsi_cmnd->sc_data_direction;
330 int dma_error;
331 327
332 /* 328 /*
333 * There are three possibilities here - use scatter-gather segment, use 329 * There are three possibilities here - use scatter-gather segment, use
@@ -336,26 +332,22 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
336 * data bde entry. 332 * data bde entry.
337 */ 333 */
338 bpl += 2; 334 bpl += 2;
339 if (scsi_cmnd->use_sg) { 335 nseg = scsi_dma_map(scsi_cmnd);
336 if (nseg > 0) {
340 /* 337 /*
341 * The driver stores the segment count returned from pci_map_sg 338 * The driver stores the segment count returned from pci_map_sg
342 * because this a count of dma-mappings used to map the use_sg 339 * because this a count of dma-mappings used to map the use_sg
343 * pages. They are not guaranteed to be the same for those 340 * pages. They are not guaranteed to be the same for those
344 * architectures that implement an IOMMU. 341 * architectures that implement an IOMMU.
345 */ 342 */
346 sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
347 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
348 scsi_cmnd->use_sg, datadir);
349 if (lpfc_cmd->seg_cnt == 0)
350 return 1;
351 343
344 lpfc_cmd->seg_cnt = nseg;
352 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 345 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
353 printk(KERN_ERR "%s: Too many sg segments from " 346 printk(KERN_ERR "%s: Too many sg segments from "
354 "dma_map_sg. Config %d, seg_cnt %d", 347 "dma_map_sg. Config %d, seg_cnt %d",
355 __FUNCTION__, phba->cfg_sg_seg_cnt, 348 __FUNCTION__, phba->cfg_sg_seg_cnt,
356 lpfc_cmd->seg_cnt); 349 lpfc_cmd->seg_cnt);
357 dma_unmap_sg(&phba->pcidev->dev, sgel, 350 scsi_dma_unmap(scsi_cmnd);
358 lpfc_cmd->seg_cnt, datadir);
359 return 1; 351 return 1;
360 } 352 }
361 353
@@ -365,7 +357,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
365 * single scsi command. Just run through the seg_cnt and format 357 * single scsi command. Just run through the seg_cnt and format
366 * the bde's. 358 * the bde's.
367 */ 359 */
368 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 360 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
369 physaddr = sg_dma_address(sgel); 361 physaddr = sg_dma_address(sgel);
370 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 362 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
371 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 363 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -376,35 +368,10 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
376 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 368 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
377 bpl->tus.w = le32_to_cpu(bpl->tus.w); 369 bpl->tus.w = le32_to_cpu(bpl->tus.w);
378 bpl++; 370 bpl++;
379 sgel++;
380 num_bde++; 371 num_bde++;
381 } 372 }
382 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 373 } else if (nseg < 0)
383 physaddr = dma_map_single(&phba->pcidev->dev, 374 return 1;
384 scsi_cmnd->request_buffer,
385 scsi_cmnd->request_bufflen,
386 datadir);
387 dma_error = dma_mapping_error(physaddr);
388 if (dma_error) {
389 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
390 "%d (%d):0718 Unable to dma_map_single "
391 "request_buffer: x%x\n",
392 phba->brd_no, vpi, dma_error);
393 return 1;
394 }
395
396 lpfc_cmd->nonsg_phys = physaddr;
397 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
398 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
399 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
400 if (datadir == DMA_TO_DEVICE)
401 bpl->tus.f.bdeFlags = 0;
402 else
403 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
404 bpl->tus.w = le32_to_cpu(bpl->tus.w);
405 num_bde = 1;
406 bpl++;
407 }
408 375
409 /* 376 /*
410 * Finish initializing those IOCB fields that are dependent on the 377 * Finish initializing those IOCB fields that are dependent on the
@@ -417,7 +384,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
417 (num_bde * sizeof (struct ulp_bde64)); 384 (num_bde * sizeof (struct ulp_bde64));
418 iocb_cmd->ulpBdeCount = 1; 385 iocb_cmd->ulpBdeCount = 1;
419 iocb_cmd->ulpLe = 1; 386 iocb_cmd->ulpLe = 1;
420 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 387 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
421 return 0; 388 return 0;
422} 389}
423 390
@@ -430,16 +397,8 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
430 * a request buffer, but did not request use_sg. There is a third 397 * a request buffer, but did not request use_sg. There is a third
431 * case, but it does not require resource deallocation. 398 * case, but it does not require resource deallocation.
432 */ 399 */
433 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 400 if (psb->seg_cnt > 0)
434 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 401 scsi_dma_unmap(psb->pCmd);
435 psb->seg_cnt, psb->pCmd->sc_data_direction);
436 } else {
437 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
438 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
439 psb->pCmd->request_bufflen,
440 psb->pCmd->sc_data_direction);
441 }
442 }
443} 402}
444 403
445static void 404static void
@@ -502,15 +461,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
502 } 461 }
503 } 462 }
504 463
505 cmnd->resid = 0; 464 scsi_set_resid(cmnd, 0);
506 if (resp_info & RESID_UNDER) { 465 if (resp_info & RESID_UNDER) {
507 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 466 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
508 467
509 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 468 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
510 "%d (%d):0716 FCP Read Underrun, expected %d, " 469 "%d (%d):0716 FCP Read Underrun, expected %d, "
511 "residual %d Data: x%x x%x x%x\n", 470 "residual %d Data: x%x x%x x%x\n",
512 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl), 471 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
513 cmnd->resid, fcpi_parm, cmnd->cmnd[0], 472 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
514 cmnd->underflow); 473 cmnd->underflow);
515 474
516 /* 475 /*
@@ -520,15 +479,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
520 */ 479 */
521 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 480 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
522 fcpi_parm && 481 fcpi_parm &&
523 (cmnd->resid != fcpi_parm)) { 482 (scsi_get_resid(cmnd) != fcpi_parm)) {
524 lpfc_printf_log(phba, KERN_WARNING, 483 lpfc_printf_log(phba, KERN_WARNING,
525 LOG_FCP | LOG_FCP_ERROR, 484 LOG_FCP | LOG_FCP_ERROR,
526 "%d (%d):0735 FCP Read Check Error " 485 "%d (%d):0735 FCP Read Check Error "
527 "and Underrun Data: x%x x%x x%x x%x\n", 486 "and Underrun Data: x%x x%x x%x x%x\n",
528 phba->brd_no, vpi, 487 phba->brd_no, vpi,
529 be32_to_cpu(fcpcmd->fcpDl), 488 be32_to_cpu(fcpcmd->fcpDl),
530 cmnd->resid, fcpi_parm, cmnd->cmnd[0]); 489 scsi_get_resid(cmnd), fcpi_parm,
531 cmnd->resid = cmnd->request_bufflen; 490 cmnd->cmnd[0]);
491 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
532 host_status = DID_ERROR; 492 host_status = DID_ERROR;
533 } 493 }
534 /* 494 /*
@@ -539,15 +499,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
539 */ 499 */
540 if (!(resp_info & SNS_LEN_VALID) && 500 if (!(resp_info & SNS_LEN_VALID) &&
541 (scsi_status == SAM_STAT_GOOD) && 501 (scsi_status == SAM_STAT_GOOD) &&
542 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 502 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
503 < cmnd->underflow)) {
543 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 504 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
544 "%d (%d):0717 FCP command x%x residual " 505 "%d (%d):0717 FCP command x%x residual "
545 "underrun converted to error " 506 "underrun converted to error "
546 "Data: x%x x%x x%x\n", 507 "Data: x%x x%x x%x\n",
547 phba->brd_no, vpi, cmnd->cmnd[0], 508 phba->brd_no, vpi, cmnd->cmnd[0],
548 cmnd->request_bufflen, cmnd->resid, 509 cmnd->request_bufflen,
549 cmnd->underflow); 510 scsi_get_resid(cmnd), cmnd->underflow);
550
551 host_status = DID_ERROR; 511 host_status = DID_ERROR;
552 } 512 }
553 } else if (resp_info & RESID_OVER) { 513 } else if (resp_info & RESID_OVER) {
@@ -555,7 +515,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
555 "%d (%d):0720 FCP command x%x residual " 515 "%d (%d):0720 FCP command x%x residual "
556 "overrun error. Data: x%x x%x \n", 516 "overrun error. Data: x%x x%x \n",
557 phba->brd_no, vpi, cmnd->cmnd[0], 517 phba->brd_no, vpi, cmnd->cmnd[0],
558 cmnd->request_bufflen, cmnd->resid); 518 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
559 host_status = DID_ERROR; 519 host_status = DID_ERROR;
560 520
561 /* 521 /*
@@ -572,7 +532,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
572 be32_to_cpu(fcprsp->rspResId), 532 be32_to_cpu(fcprsp->rspResId),
573 fcpi_parm, cmnd->cmnd[0]); 533 fcpi_parm, cmnd->cmnd[0]);
574 host_status = DID_ERROR; 534 host_status = DID_ERROR;
575 cmnd->resid = cmnd->request_bufflen; 535 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
576 } 536 }
577 537
578 out: 538 out:
@@ -652,7 +612,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
652 "x%x SNS x%x x%x Data: x%x x%x\n", 612 "x%x SNS x%x x%x Data: x%x x%x\n",
653 phba->brd_no, vpi, cmd->device->id, 613 phba->brd_no, vpi, cmd->device->id,
654 cmd->device->lun, cmd, cmd->result, 614 cmd->device->lun, cmd, cmd->result,
655 *lp, *(lp + 3), cmd->retries, cmd->resid); 615 *lp, *(lp + 3), cmd->retries,
616 scsi_get_resid(cmd));
656 } 617 }
657 618
658 result = cmd->result; 619 result = cmd->result;
@@ -767,22 +728,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
767 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 728 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
768 * data bde entry. 729 * data bde entry.
769 */ 730 */
770 if (scsi_cmnd->use_sg) { 731 if (scsi_sg_count(scsi_cmnd)) {
771 if (datadir == DMA_TO_DEVICE) {
772 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
773 iocb_cmd->un.fcpi.fcpi_parm = 0;
774 iocb_cmd->ulpPU = 0;
775 fcp_cmnd->fcpCntl3 = WRITE_DATA;
776 phba->fc4OutputRequests++;
777 } else {
778 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
779 iocb_cmd->ulpPU = PARM_READ_CHECK;
780 iocb_cmd->un.fcpi.fcpi_parm =
781 scsi_cmnd->request_bufflen;
782 fcp_cmnd->fcpCntl3 = READ_DATA;
783 phba->fc4InputRequests++;
784 }
785 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
786 if (datadir == DMA_TO_DEVICE) { 732 if (datadir == DMA_TO_DEVICE) {
787 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 733 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
788 iocb_cmd->un.fcpi.fcpi_parm = 0; 734 iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -792,8 +738,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
792 } else { 738 } else {
793 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 739 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
794 iocb_cmd->ulpPU = PARM_READ_CHECK; 740 iocb_cmd->ulpPU = PARM_READ_CHECK;
795 iocb_cmd->un.fcpi.fcpi_parm = 741 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
796 scsi_cmnd->request_bufflen;
797 fcp_cmnd->fcpCntl3 = READ_DATA; 742 fcp_cmnd->fcpCntl3 = READ_DATA;
798 phba->fc4InputRequests++; 743 phba->fc4InputRequests++;
799 } 744 }