diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2007-05-25 13:28:25 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-06-07 10:02:50 -0400 |
commit | bb350d1decd9c48ffaa7f7e263df3056df9f4f21 (patch) | |
tree | 0e95d2a90a3c8f91d73e78745024404bad0c310c | |
parent | f0002c4e1fe22d74a43d4ba3379257ee612e2724 (diff) |
[SCSI] ib_srp: convert to use the data buffer accessors
- remove the unnecessary map_single path.
- convert to use the new accessors for the sg lists and the
parameters.
Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Roland Dreier <rdreier@cisco.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 63 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 5 |
2 files changed, 20 insertions, 48 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 39bf057fbc43..f01ca182f226 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -455,10 +455,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
455 | struct srp_target_port *target, | 455 | struct srp_target_port *target, |
456 | struct srp_request *req) | 456 | struct srp_request *req) |
457 | { | 457 | { |
458 | struct scatterlist *scat; | 458 | if (!scsi_sglist(scmnd) || |
459 | int nents; | ||
460 | |||
461 | if (!scmnd->request_buffer || | ||
462 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 459 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
463 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 460 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
464 | return; | 461 | return; |
@@ -468,20 +465,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
468 | req->fmr = NULL; | 465 | req->fmr = NULL; |
469 | } | 466 | } |
470 | 467 | ||
471 | /* | 468 | ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd), |
472 | * This handling of non-SG commands can be killed when the | 469 | scsi_sg_count(scmnd), scmnd->sc_data_direction); |
473 | * SCSI midlayer no longer generates non-SG commands. | ||
474 | */ | ||
475 | if (likely(scmnd->use_sg)) { | ||
476 | nents = scmnd->use_sg; | ||
477 | scat = scmnd->request_buffer; | ||
478 | } else { | ||
479 | nents = 1; | ||
480 | scat = &req->fake_sg; | ||
481 | } | ||
482 | |||
483 | ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, | ||
484 | scmnd->sc_data_direction); | ||
485 | } | 470 | } |
486 | 471 | ||
487 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) | 472 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
@@ -595,6 +580,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
595 | int ret; | 580 | int ret; |
596 | struct srp_device *dev = target->srp_host->dev; | 581 | struct srp_device *dev = target->srp_host->dev; |
597 | struct ib_device *ibdev = dev->dev; | 582 | struct ib_device *ibdev = dev->dev; |
583 | struct scatterlist *sg; | ||
598 | 584 | ||
599 | if (!dev->fmr_pool) | 585 | if (!dev->fmr_pool) |
600 | return -ENODEV; | 586 | return -ENODEV; |
@@ -604,16 +590,16 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
604 | return -EINVAL; | 590 | return -EINVAL; |
605 | 591 | ||
606 | len = page_cnt = 0; | 592 | len = page_cnt = 0; |
607 | for (i = 0; i < sg_cnt; ++i) { | 593 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { |
608 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 594 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
609 | 595 | ||
610 | if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { | 596 | if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { |
611 | if (i > 0) | 597 | if (i > 0) |
612 | return -EINVAL; | 598 | return -EINVAL; |
613 | else | 599 | else |
614 | ++page_cnt; | 600 | ++page_cnt; |
615 | } | 601 | } |
616 | if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & | 602 | if ((ib_sg_dma_address(ibdev, sg) + dma_len) & |
617 | ~dev->fmr_page_mask) { | 603 | ~dev->fmr_page_mask) { |
618 | if (i < sg_cnt - 1) | 604 | if (i < sg_cnt - 1) |
619 | return -EINVAL; | 605 | return -EINVAL; |
@@ -633,12 +619,12 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
633 | return -ENOMEM; | 619 | return -ENOMEM; |
634 | 620 | ||
635 | page_cnt = 0; | 621 | page_cnt = 0; |
636 | for (i = 0; i < sg_cnt; ++i) { | 622 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { |
637 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 623 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
638 | 624 | ||
639 | for (j = 0; j < dma_len; j += dev->fmr_page_size) | 625 | for (j = 0; j < dma_len; j += dev->fmr_page_size) |
640 | dma_pages[page_cnt++] = | 626 | dma_pages[page_cnt++] = |
641 | (ib_sg_dma_address(ibdev, &scat[i]) & | 627 | (ib_sg_dma_address(ibdev, sg) & |
642 | dev->fmr_page_mask) + j; | 628 | dev->fmr_page_mask) + j; |
643 | } | 629 | } |
644 | 630 | ||
@@ -673,7 +659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
673 | struct srp_device *dev; | 659 | struct srp_device *dev; |
674 | struct ib_device *ibdev; | 660 | struct ib_device *ibdev; |
675 | 661 | ||
676 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | 662 | if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) |
677 | return sizeof (struct srp_cmd); | 663 | return sizeof (struct srp_cmd); |
678 | 664 | ||
679 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && | 665 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && |
@@ -683,18 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
683 | return -EINVAL; | 669 | return -EINVAL; |
684 | } | 670 | } |
685 | 671 | ||
686 | /* | 672 | nents = scsi_sg_count(scmnd); |
687 | * This handling of non-SG commands can be killed when the | 673 | scat = scsi_sglist(scmnd); |
688 | * SCSI midlayer no longer generates non-SG commands. | ||
689 | */ | ||
690 | if (likely(scmnd->use_sg)) { | ||
691 | nents = scmnd->use_sg; | ||
692 | scat = scmnd->request_buffer; | ||
693 | } else { | ||
694 | nents = 1; | ||
695 | scat = &req->fake_sg; | ||
696 | sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); | ||
697 | } | ||
698 | 674 | ||
699 | dev = target->srp_host->dev; | 675 | dev = target->srp_host->dev; |
700 | ibdev = dev->dev; | 676 | ibdev = dev->dev; |
@@ -724,6 +700,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
724 | * descriptor. | 700 | * descriptor. |
725 | */ | 701 | */ |
726 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | 702 | struct srp_indirect_buf *buf = (void *) cmd->add_data; |
703 | struct scatterlist *sg; | ||
727 | u32 datalen = 0; | 704 | u32 datalen = 0; |
728 | int i; | 705 | int i; |
729 | 706 | ||
@@ -732,11 +709,11 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
732 | sizeof (struct srp_indirect_buf) + | 709 | sizeof (struct srp_indirect_buf) + |
733 | count * sizeof (struct srp_direct_buf); | 710 | count * sizeof (struct srp_direct_buf); |
734 | 711 | ||
735 | for (i = 0; i < count; ++i) { | 712 | scsi_for_each_sg(scmnd, sg, count, i) { |
736 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 713 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
737 | 714 | ||
738 | buf->desc_list[i].va = | 715 | buf->desc_list[i].va = |
739 | cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); | 716 | cpu_to_be64(ib_sg_dma_address(ibdev, sg)); |
740 | buf->desc_list[i].key = | 717 | buf->desc_list[i].key = |
741 | cpu_to_be32(dev->mr->rkey); | 718 | cpu_to_be32(dev->mr->rkey); |
742 | buf->desc_list[i].len = cpu_to_be32(dma_len); | 719 | buf->desc_list[i].len = cpu_to_be32(dma_len); |
@@ -802,9 +779,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
802 | } | 779 | } |
803 | 780 | ||
804 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) | 781 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) |
805 | scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); | 782 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); |
806 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 783 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
807 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | 784 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
808 | 785 | ||
809 | if (!req->tsk_mgmt) { | 786 | if (!req->tsk_mgmt) { |
810 | scmnd->host_scribble = (void *) -1L; | 787 | scmnd->host_scribble = (void *) -1L; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 1d53c7bc368f..e3573e7038c4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -106,11 +106,6 @@ struct srp_request { | |||
106 | struct srp_iu *cmd; | 106 | struct srp_iu *cmd; |
107 | struct srp_iu *tsk_mgmt; | 107 | struct srp_iu *tsk_mgmt; |
108 | struct ib_pool_fmr *fmr; | 108 | struct ib_pool_fmr *fmr; |
109 | /* | ||
110 | * Fake scatterlist used when scmnd->use_sg==0. Can be killed | ||
111 | * when the SCSI midlayer no longer generates non-SG commands. | ||
112 | */ | ||
113 | struct scatterlist fake_sg; | ||
114 | struct completion done; | 109 | struct completion done; |
115 | short index; | 110 | short index; |
116 | u8 cmd_done; | 111 | u8 cmd_done; |