aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-12-12 17:30:55 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 17:30:55 -0500
commit85507bcce0cd6ec859943da4e07227c124a18f3f (patch)
tree21038b6a706d3ad9e0796e718fab3241003351df /drivers/infiniband/ulp
parent37ccf9df974f55e99bf21278133b065cbbcf3f79 (diff)
IB/srp: Use new verbs IB DMA mapping functions
Convert SRP to use the new verbs DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c81
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
2 files changed, 49 insertions, 34 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a6289595557b..e9b6a6f07dd7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
122 if (!iu->buf) 122 if (!iu->buf)
123 goto out_free_iu; 123 goto out_free_iu;
124 124
125 iu->dma = dma_map_single(host->dev->dev->dma_device, 125 iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
126 iu->buf, size, direction); 126 if (ib_dma_mapping_error(host->dev->dev, iu->dma))
127 if (dma_mapping_error(iu->dma))
128 goto out_free_buf; 127 goto out_free_buf;
129 128
130 iu->size = size; 129 iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
145 if (!iu) 144 if (!iu)
146 return; 145 return;
147 146
148 dma_unmap_single(host->dev->dev->dma_device, 147 ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
149 iu->dma, iu->size, iu->direction);
150 kfree(iu->buf); 148 kfree(iu->buf);
151 kfree(iu); 149 kfree(iu);
152} 150}
@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
482 scat = &req->fake_sg; 480 scat = &req->fake_sg;
483 } 481 }
484 482
485 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 483 ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
486 scmnd->sc_data_direction); 484 scmnd->sc_data_direction);
487} 485}
488 486
489static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 487static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
595 int i, j; 593 int i, j;
596 int ret; 594 int ret;
597 struct srp_device *dev = target->srp_host->dev; 595 struct srp_device *dev = target->srp_host->dev;
596 struct ib_device *ibdev = dev->dev;
598 597
599 if (!dev->fmr_pool) 598 if (!dev->fmr_pool)
600 return -ENODEV; 599 return -ENODEV;
601 600
602 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 601 if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
603 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
604 return -EINVAL; 603 return -EINVAL;
605 604
606 len = page_cnt = 0; 605 len = page_cnt = 0;
607 for (i = 0; i < sg_cnt; ++i) { 606 for (i = 0; i < sg_cnt; ++i) {
608 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 607 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
608
609 if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
609 if (i > 0) 610 if (i > 0)
610 return -EINVAL; 611 return -EINVAL;
611 else 612 else
612 ++page_cnt; 613 ++page_cnt;
613 } 614 }
614 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 615 if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
615 ~dev->fmr_page_mask) { 616 ~dev->fmr_page_mask) {
616 if (i < sg_cnt - 1) 617 if (i < sg_cnt - 1)
617 return -EINVAL; 618 return -EINVAL;
@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
619 ++page_cnt; 620 ++page_cnt;
620 } 621 }
621 622
622 len += sg_dma_len(&scat[i]); 623 len += dma_len;
623 } 624 }
624 625
625 page_cnt += len >> dev->fmr_page_shift; 626 page_cnt += len >> dev->fmr_page_shift;
@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
631 return -ENOMEM; 632 return -ENOMEM;
632 633
633 page_cnt = 0; 634 page_cnt = 0;
634 for (i = 0; i < sg_cnt; ++i) 635 for (i = 0; i < sg_cnt; ++i) {
635 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 636 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
637
638 for (j = 0; j < dma_len; j += dev->fmr_page_size)
636 dma_pages[page_cnt++] = 639 dma_pages[page_cnt++] =
637 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 640 (ib_sg_dma_address(ibdev, &scat[i]) &
641 dev->fmr_page_mask) + j;
642 }
638 643
639 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 644 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
640 dma_pages, page_cnt, io_addr); 645 dma_pages, page_cnt, io_addr);
@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
644 goto out; 649 goto out;
645 } 650 }
646 651
647 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 652 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
653 ~dev->fmr_page_mask);
648 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
649 buf->len = cpu_to_be32(len); 655 buf->len = cpu_to_be32(len);
650 656
@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
663 struct srp_cmd *cmd = req->cmd->buf; 669 struct srp_cmd *cmd = req->cmd->buf;
664 int len, nents, count; 670 int len, nents, count;
665 u8 fmt = SRP_DATA_DESC_DIRECT; 671 u8 fmt = SRP_DATA_DESC_DIRECT;
672 struct srp_device *dev;
673 struct ib_device *ibdev;
666 674
667 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 675 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
668 return sizeof (struct srp_cmd); 676 return sizeof (struct srp_cmd);
@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 695 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
688 } 696 }
689 697
690 count = dma_map_sg(target->srp_host->dev->dev->dma_device, 698 dev = target->srp_host->dev;
691 scat, nents, scmnd->sc_data_direction); 699 ibdev = dev->dev;
700
701 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
692 702
693 fmt = SRP_DATA_DESC_DIRECT; 703 fmt = SRP_DATA_DESC_DIRECT;
694 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 704 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
702 */ 712 */
703 struct srp_direct_buf *buf = (void *) cmd->add_data; 713 struct srp_direct_buf *buf = (void *) cmd->add_data;
704 714
705 buf->va = cpu_to_be64(sg_dma_address(scat)); 715 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
706 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 716 buf->key = cpu_to_be32(dev->mr->rkey);
707 buf->len = cpu_to_be32(sg_dma_len(scat)); 717 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
708 } else if (srp_map_fmr(target, scat, count, req, 718 } else if (srp_map_fmr(target, scat, count, req,
709 (void *) cmd->add_data)) { 719 (void *) cmd->add_data)) {
710 /* 720 /*
@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
722 count * sizeof (struct srp_direct_buf); 732 count * sizeof (struct srp_direct_buf);
723 733
724 for (i = 0; i < count; ++i) { 734 for (i = 0; i < count; ++i) {
735 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
736
725 buf->desc_list[i].va = 737 buf->desc_list[i].va =
726 cpu_to_be64(sg_dma_address(&scat[i])); 738 cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
727 buf->desc_list[i].key = 739 buf->desc_list[i].key =
728 cpu_to_be32(target->srp_host->dev->mr->rkey); 740 cpu_to_be32(dev->mr->rkey);
729 buf->desc_list[i].len = 741 buf->desc_list[i].len = cpu_to_be32(dma_len);
730 cpu_to_be32(sg_dma_len(&scat[i])); 742 datalen += dma_len;
731 datalen += sg_dma_len(&scat[i]);
732 } 743 }
733 744
734 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 745 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
808 819
809static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 820static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
810{ 821{
822 struct ib_device *dev;
811 struct srp_iu *iu; 823 struct srp_iu *iu;
812 u8 opcode; 824 u8 opcode;
813 825
814 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
815 827
816 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 dev = target->srp_host->dev->dev;
817 target->max_ti_iu_len, DMA_FROM_DEVICE); 829 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
830 DMA_FROM_DEVICE);
818 831
819 opcode = *(u8 *) iu->buf; 832 opcode = *(u8 *) iu->buf;
820 833
@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
850 break; 863 break;
851 } 864 }
852 865
853 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 866 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
854 target->max_ti_iu_len, DMA_FROM_DEVICE); 867 DMA_FROM_DEVICE);
855} 868}
856 869
857static void srp_completion(struct ib_cq *cq, void *target_ptr) 870static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
969 struct srp_request *req; 982 struct srp_request *req;
970 struct srp_iu *iu; 983 struct srp_iu *iu;
971 struct srp_cmd *cmd; 984 struct srp_cmd *cmd;
985 struct ib_device *dev;
972 int len; 986 int len;
973 987
974 if (target->state == SRP_TARGET_CONNECTING) 988 if (target->state == SRP_TARGET_CONNECTING)
@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
985 if (!iu) 999 if (!iu)
986 goto err; 1000 goto err;
987 1001
988 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1002 dev = target->srp_host->dev->dev;
989 srp_max_iu_len, DMA_TO_DEVICE); 1003 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1004 DMA_TO_DEVICE);
990 1005
991 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 req = list_entry(target->free_reqs.next, struct srp_request, list);
992 1007
@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1018 goto err_unmap; 1033 goto err_unmap;
1019 } 1034 }
1020 1035
1021 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1036 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1022 srp_max_iu_len, DMA_TO_DEVICE); 1037 DMA_TO_DEVICE);
1023 1038
1024 if (__srp_post_send(target, iu, len)) { 1039 if (__srp_post_send(target, iu, len)) {
1025 printk(KERN_ERR PFX "Send failed\n"); 1040 printk(KERN_ERR PFX "Send failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef51374..868a540ef7cd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,7 +161,7 @@ struct srp_target_port {
161}; 161};
162 162
163struct srp_iu { 163struct srp_iu {
164 dma_addr_t dma; 164 u64 dma;
165 void *buf; 165 void *buf;
166 size_t size; 166 size_t size;
167 enum dma_data_direction direction; 167 enum dma_data_direction direction;