diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 10:59:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 10:59:46 -0400 |
commit | 0625bef6060fab4aab0e484130b59af5e9ac81bc (patch) | |
tree | 2774beafe6bcab689ab5da5413726d8f8f606241 /drivers/infiniband | |
parent | a6a1d6485e77f28c11cdf943a3ed2a3fd83ac727 (diff) | |
parent | ba82638247c10cfda32268c894f78f8124f54084 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB: Increase DMA max_segment_size on Mellanox hardware
IB/mad: Improve an error message so error code is included
RDMA/nes: Don't print success message at level KERN_ERR
RDMA/addr: Fix return of uninitialized ret value
IB/srp: try to use larger FMR sizes to cover our mappings
IB/srp: add support for indirect tables that don't fit in SRP_CMD
IB/srp: rework mapping engine to use multiple FMR entries
IB/srp: allow sg_tablesize to be set for each target
IB/srp: move IB CM setup completion into its own function
IB/srp: always avoid non-zero offsets into an FMR
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/addr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/agent.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 725 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 38 |
6 files changed, 521 insertions, 252 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index e0ef5fdc361e..4ffc224faa7f 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -204,7 +204,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, | |||
204 | 204 | ||
205 | /* If the device does ARP internally, return 'done' */ | 205 | /* If the device does ARP internally, return 'done' */ |
206 | if (rt->dst.dev->flags & IFF_NOARP) { | 206 | if (rt->dst.dev->flags & IFF_NOARP) { |
207 | rdma_copy_addr(addr, rt->dst.dev, NULL); | 207 | ret = rdma_copy_addr(addr, rt->dst.dev, NULL); |
208 | goto put; | 208 | goto put; |
209 | } | 209 | } |
210 | 210 | ||
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 91916a8d5de4..2bc7f5af64f4 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
@@ -101,7 +101,8 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
101 | agent = port_priv->agent[qpn]; | 101 | agent = port_priv->agent[qpn]; |
102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); | 102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); |
103 | if (IS_ERR(ah)) { | 103 | if (IS_ERR(ah)) { |
104 | printk(KERN_ERR SPFX "ib_create_ah_from_wc error\n"); | 104 | printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", |
105 | PTR_ERR(ah)); | ||
105 | return; | 106 | return; |
106 | } | 107 | } |
107 | 108 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 8a40cd539ab1..f24b79b805f2 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -1043,6 +1043,9 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) | |||
1043 | } | 1043 | } |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | /* We can handle large RDMA requests, so allow larger segments. */ | ||
1047 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); | ||
1048 | |||
1046 | mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); | 1049 | mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); |
1047 | if (!mdev) { | 1050 | if (!mdev) { |
1048 | dev_err(&pdev->dev, "Device struct alloc failed, " | 1051 | dev_err(&pdev->dev, "Device struct alloc failed, " |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3d7f3664b67b..13de1192927c 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -694,7 +694,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
694 | nesdev->netdev_count++; | 694 | nesdev->netdev_count++; |
695 | nesdev->nesadapter->netdev_count++; | 695 | nesdev->nesadapter->netdev_count++; |
696 | 696 | ||
697 | printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", | 697 | printk(KERN_INFO PFX "%s: NetEffect RNIC driver successfully loaded.\n", |
698 | pci_name(pcidev)); | 698 | pci_name(pcidev)); |
699 | return 0; | 699 | return 0; |
700 | 700 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 83664ed2804f..376d640487d2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -59,25 +59,31 @@ MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " | |||
59 | "v" DRV_VERSION " (" DRV_RELDATE ")"); | 59 | "v" DRV_VERSION " (" DRV_RELDATE ")"); |
60 | MODULE_LICENSE("Dual BSD/GPL"); | 60 | MODULE_LICENSE("Dual BSD/GPL"); |
61 | 61 | ||
62 | static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; | 62 | static unsigned int srp_sg_tablesize; |
63 | static int srp_max_iu_len; | 63 | static unsigned int cmd_sg_entries; |
64 | static unsigned int indirect_sg_entries; | ||
65 | static bool allow_ext_sg; | ||
66 | static int topspin_workarounds = 1; | ||
64 | 67 | ||
65 | module_param(srp_sg_tablesize, int, 0444); | 68 | module_param(srp_sg_tablesize, uint, 0444); |
66 | MODULE_PARM_DESC(srp_sg_tablesize, | 69 | MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); |
67 | "Max number of gather/scatter entries per I/O (default is 12, max 255)"); | ||
68 | 70 | ||
69 | static int topspin_workarounds = 1; | 71 | module_param(cmd_sg_entries, uint, 0444); |
72 | MODULE_PARM_DESC(cmd_sg_entries, | ||
73 | "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); | ||
74 | |||
75 | module_param(indirect_sg_entries, uint, 0444); | ||
76 | MODULE_PARM_DESC(indirect_sg_entries, | ||
77 | "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); | ||
78 | |||
79 | module_param(allow_ext_sg, bool, 0444); | ||
80 | MODULE_PARM_DESC(allow_ext_sg, | ||
81 | "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); | ||
70 | 82 | ||
71 | module_param(topspin_workarounds, int, 0444); | 83 | module_param(topspin_workarounds, int, 0444); |
72 | MODULE_PARM_DESC(topspin_workarounds, | 84 | MODULE_PARM_DESC(topspin_workarounds, |
73 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); | 85 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); |
74 | 86 | ||
75 | static int mellanox_workarounds = 1; | ||
76 | |||
77 | module_param(mellanox_workarounds, int, 0444); | ||
78 | MODULE_PARM_DESC(mellanox_workarounds, | ||
79 | "Enable workarounds for Mellanox SRP target bugs if != 0"); | ||
80 | |||
81 | static void srp_add_one(struct ib_device *device); | 87 | static void srp_add_one(struct ib_device *device); |
82 | static void srp_remove_one(struct ib_device *device); | 88 | static void srp_remove_one(struct ib_device *device); |
83 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); | 89 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); |
@@ -114,14 +120,6 @@ static int srp_target_is_topspin(struct srp_target_port *target) | |||
114 | !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); | 120 | !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); |
115 | } | 121 | } |
116 | 122 | ||
117 | static int srp_target_is_mellanox(struct srp_target_port *target) | ||
118 | { | ||
119 | static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; | ||
120 | |||
121 | return mellanox_workarounds && | ||
122 | !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); | ||
123 | } | ||
124 | |||
125 | static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, | 123 | static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, |
126 | gfp_t gfp_mask, | 124 | gfp_t gfp_mask, |
127 | enum dma_data_direction direction) | 125 | enum dma_data_direction direction) |
@@ -378,7 +376,7 @@ static int srp_send_req(struct srp_target_port *target) | |||
378 | 376 | ||
379 | req->priv.opcode = SRP_LOGIN_REQ; | 377 | req->priv.opcode = SRP_LOGIN_REQ; |
380 | req->priv.tag = 0; | 378 | req->priv.tag = 0; |
381 | req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); | 379 | req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); |
382 | req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | | 380 | req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | |
383 | SRP_BUF_FORMAT_INDIRECT); | 381 | SRP_BUF_FORMAT_INDIRECT); |
384 | /* | 382 | /* |
@@ -456,6 +454,24 @@ static bool srp_change_state(struct srp_target_port *target, | |||
456 | return changed; | 454 | return changed; |
457 | } | 455 | } |
458 | 456 | ||
457 | static void srp_free_req_data(struct srp_target_port *target) | ||
458 | { | ||
459 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; | ||
460 | struct srp_request *req; | ||
461 | int i; | ||
462 | |||
463 | for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { | ||
464 | kfree(req->fmr_list); | ||
465 | kfree(req->map_page); | ||
466 | if (req->indirect_dma_addr) { | ||
467 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, | ||
468 | target->indirect_size, | ||
469 | DMA_TO_DEVICE); | ||
470 | } | ||
471 | kfree(req->indirect_desc); | ||
472 | } | ||
473 | } | ||
474 | |||
459 | static void srp_remove_work(struct work_struct *work) | 475 | static void srp_remove_work(struct work_struct *work) |
460 | { | 476 | { |
461 | struct srp_target_port *target = | 477 | struct srp_target_port *target = |
@@ -472,6 +488,7 @@ static void srp_remove_work(struct work_struct *work) | |||
472 | scsi_remove_host(target->scsi_host); | 488 | scsi_remove_host(target->scsi_host); |
473 | ib_destroy_cm_id(target->cm_id); | 489 | ib_destroy_cm_id(target->cm_id); |
474 | srp_free_target_ib(target); | 490 | srp_free_target_ib(target); |
491 | srp_free_req_data(target); | ||
475 | scsi_host_put(target->scsi_host); | 492 | scsi_host_put(target->scsi_host); |
476 | } | 493 | } |
477 | 494 | ||
@@ -535,18 +552,20 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
535 | struct srp_target_port *target, | 552 | struct srp_target_port *target, |
536 | struct srp_request *req) | 553 | struct srp_request *req) |
537 | { | 554 | { |
555 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; | ||
556 | struct ib_pool_fmr **pfmr; | ||
557 | |||
538 | if (!scsi_sglist(scmnd) || | 558 | if (!scsi_sglist(scmnd) || |
539 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 559 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
540 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 560 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
541 | return; | 561 | return; |
542 | 562 | ||
543 | if (req->fmr) { | 563 | pfmr = req->fmr_list; |
544 | ib_fmr_pool_unmap(req->fmr); | 564 | while (req->nfmr--) |
545 | req->fmr = NULL; | 565 | ib_fmr_pool_unmap(*pfmr++); |
546 | } | ||
547 | 566 | ||
548 | ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), | 567 | ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), |
549 | scsi_sg_count(scmnd), scmnd->sc_data_direction); | 568 | scmnd->sc_data_direction); |
550 | } | 569 | } |
551 | 570 | ||
552 | static void srp_remove_req(struct srp_target_port *target, | 571 | static void srp_remove_req(struct srp_target_port *target, |
@@ -645,96 +664,151 @@ err: | |||
645 | return ret; | 664 | return ret; |
646 | } | 665 | } |
647 | 666 | ||
648 | static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | 667 | static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, |
649 | int sg_cnt, struct srp_request *req, | 668 | unsigned int dma_len, u32 rkey) |
650 | struct srp_direct_buf *buf) | ||
651 | { | 669 | { |
652 | u64 io_addr = 0; | 670 | struct srp_direct_buf *desc = state->desc; |
653 | u64 *dma_pages; | ||
654 | u32 len; | ||
655 | int page_cnt; | ||
656 | int i, j; | ||
657 | int ret; | ||
658 | struct srp_device *dev = target->srp_host->srp_dev; | ||
659 | struct ib_device *ibdev = dev->dev; | ||
660 | struct scatterlist *sg; | ||
661 | 671 | ||
662 | if (!dev->fmr_pool) | 672 | desc->va = cpu_to_be64(dma_addr); |
663 | return -ENODEV; | 673 | desc->key = cpu_to_be32(rkey); |
674 | desc->len = cpu_to_be32(dma_len); | ||
664 | 675 | ||
665 | if (srp_target_is_mellanox(target) && | 676 | state->total_len += dma_len; |
666 | (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) | 677 | state->desc++; |
667 | return -EINVAL; | 678 | state->ndesc++; |
679 | } | ||
668 | 680 | ||
669 | len = page_cnt = 0; | 681 | static int srp_map_finish_fmr(struct srp_map_state *state, |
670 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { | 682 | struct srp_target_port *target) |
671 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); | 683 | { |
684 | struct srp_device *dev = target->srp_host->srp_dev; | ||
685 | struct ib_pool_fmr *fmr; | ||
686 | u64 io_addr = 0; | ||
672 | 687 | ||
673 | if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { | 688 | if (!state->npages) |
674 | if (i > 0) | 689 | return 0; |
675 | return -EINVAL; | ||
676 | else | ||
677 | ++page_cnt; | ||
678 | } | ||
679 | if ((ib_sg_dma_address(ibdev, sg) + dma_len) & | ||
680 | ~dev->fmr_page_mask) { | ||
681 | if (i < sg_cnt - 1) | ||
682 | return -EINVAL; | ||
683 | else | ||
684 | ++page_cnt; | ||
685 | } | ||
686 | 690 | ||
687 | len += dma_len; | 691 | if (state->npages == 1) { |
692 | srp_map_desc(state, state->base_dma_addr, state->fmr_len, | ||
693 | target->rkey); | ||
694 | state->npages = state->fmr_len = 0; | ||
695 | return 0; | ||
688 | } | 696 | } |
689 | 697 | ||
690 | page_cnt += len >> dev->fmr_page_shift; | 698 | fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages, |
691 | if (page_cnt > SRP_FMR_SIZE) | 699 | state->npages, io_addr); |
692 | return -ENOMEM; | 700 | if (IS_ERR(fmr)) |
701 | return PTR_ERR(fmr); | ||
693 | 702 | ||
694 | dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); | 703 | *state->next_fmr++ = fmr; |
695 | if (!dma_pages) | 704 | state->nfmr++; |
696 | return -ENOMEM; | ||
697 | 705 | ||
698 | page_cnt = 0; | 706 | srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); |
699 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { | 707 | state->npages = state->fmr_len = 0; |
700 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); | 708 | return 0; |
709 | } | ||
701 | 710 | ||
702 | for (j = 0; j < dma_len; j += dev->fmr_page_size) | 711 | static void srp_map_update_start(struct srp_map_state *state, |
703 | dma_pages[page_cnt++] = | 712 | struct scatterlist *sg, int sg_index, |
704 | (ib_sg_dma_address(ibdev, sg) & | 713 | dma_addr_t dma_addr) |
705 | dev->fmr_page_mask) + j; | 714 | { |
715 | state->unmapped_sg = sg; | ||
716 | state->unmapped_index = sg_index; | ||
717 | state->unmapped_addr = dma_addr; | ||
718 | } | ||
719 | |||
720 | static int srp_map_sg_entry(struct srp_map_state *state, | ||
721 | struct srp_target_port *target, | ||
722 | struct scatterlist *sg, int sg_index, | ||
723 | int use_fmr) | ||
724 | { | ||
725 | struct srp_device *dev = target->srp_host->srp_dev; | ||
726 | struct ib_device *ibdev = dev->dev; | ||
727 | dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); | ||
728 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); | ||
729 | unsigned int len; | ||
730 | int ret; | ||
731 | |||
732 | if (!dma_len) | ||
733 | return 0; | ||
734 | |||
735 | if (use_fmr == SRP_MAP_NO_FMR) { | ||
736 | /* Once we're in direct map mode for a request, we don't | ||
737 | * go back to FMR mode, so no need to update anything | ||
738 | * other than the descriptor. | ||
739 | */ | ||
740 | srp_map_desc(state, dma_addr, dma_len, target->rkey); | ||
741 | return 0; | ||
706 | } | 742 | } |
707 | 743 | ||
708 | req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, | 744 | /* If we start at an offset into the FMR page, don't merge into |
709 | dma_pages, page_cnt, io_addr); | 745 | * the current FMR. Finish it out, and use the kernel's MR for this |
710 | if (IS_ERR(req->fmr)) { | 746 | * sg entry. This is to avoid potential bugs on some SRP targets |
711 | ret = PTR_ERR(req->fmr); | 747 | * that were never quite defined, but went away when the initiator |
712 | req->fmr = NULL; | 748 | * avoided using FMR on such page fragments. |
713 | goto out; | 749 | */ |
750 | if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) { | ||
751 | ret = srp_map_finish_fmr(state, target); | ||
752 | if (ret) | ||
753 | return ret; | ||
754 | |||
755 | srp_map_desc(state, dma_addr, dma_len, target->rkey); | ||
756 | srp_map_update_start(state, NULL, 0, 0); | ||
757 | return 0; | ||
714 | } | 758 | } |
715 | 759 | ||
716 | buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & | 760 | /* If this is the first sg to go into the FMR, save our position. |
717 | ~dev->fmr_page_mask); | 761 | * We need to know the first unmapped entry, its index, and the |
718 | buf->key = cpu_to_be32(req->fmr->fmr->rkey); | 762 | * first unmapped address within that entry to be able to restart |
719 | buf->len = cpu_to_be32(len); | 763 | * mapping after an error. |
764 | */ | ||
765 | if (!state->unmapped_sg) | ||
766 | srp_map_update_start(state, sg, sg_index, dma_addr); | ||
720 | 767 | ||
721 | ret = 0; | 768 | while (dma_len) { |
769 | if (state->npages == SRP_FMR_SIZE) { | ||
770 | ret = srp_map_finish_fmr(state, target); | ||
771 | if (ret) | ||
772 | return ret; | ||
722 | 773 | ||
723 | out: | 774 | srp_map_update_start(state, sg, sg_index, dma_addr); |
724 | kfree(dma_pages); | 775 | } |
776 | |||
777 | len = min_t(unsigned int, dma_len, dev->fmr_page_size); | ||
725 | 778 | ||
779 | if (!state->npages) | ||
780 | state->base_dma_addr = dma_addr; | ||
781 | state->pages[state->npages++] = dma_addr; | ||
782 | state->fmr_len += len; | ||
783 | dma_addr += len; | ||
784 | dma_len -= len; | ||
785 | } | ||
786 | |||
787 | /* If the last entry of the FMR wasn't a full page, then we need to | ||
788 | * close it out and start a new one -- we can only merge at page | ||
789 | * boundries. | ||
790 | */ | ||
791 | ret = 0; | ||
792 | if (len != dev->fmr_page_size) { | ||
793 | ret = srp_map_finish_fmr(state, target); | ||
794 | if (!ret) | ||
795 | srp_map_update_start(state, NULL, 0, 0); | ||
796 | } | ||
726 | return ret; | 797 | return ret; |
727 | } | 798 | } |
728 | 799 | ||
729 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | 800 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, |
730 | struct srp_request *req) | 801 | struct srp_request *req) |
731 | { | 802 | { |
732 | struct scatterlist *scat; | 803 | struct scatterlist *scat, *sg; |
733 | struct srp_cmd *cmd = req->cmd->buf; | 804 | struct srp_cmd *cmd = req->cmd->buf; |
734 | int len, nents, count; | 805 | int i, len, nents, count, use_fmr; |
735 | u8 fmt = SRP_DATA_DESC_DIRECT; | ||
736 | struct srp_device *dev; | 806 | struct srp_device *dev; |
737 | struct ib_device *ibdev; | 807 | struct ib_device *ibdev; |
808 | struct srp_map_state state; | ||
809 | struct srp_indirect_buf *indirect_hdr; | ||
810 | u32 table_len; | ||
811 | u8 fmt; | ||
738 | 812 | ||
739 | if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) | 813 | if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) |
740 | return sizeof (struct srp_cmd); | 814 | return sizeof (struct srp_cmd); |
@@ -754,6 +828,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
754 | ibdev = dev->dev; | 828 | ibdev = dev->dev; |
755 | 829 | ||
756 | count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); | 830 | count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); |
831 | if (unlikely(count == 0)) | ||
832 | return -EIO; | ||
757 | 833 | ||
758 | fmt = SRP_DATA_DESC_DIRECT; | 834 | fmt = SRP_DATA_DESC_DIRECT; |
759 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | 835 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); |
@@ -770,49 +846,99 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
770 | buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); | 846 | buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); |
771 | buf->key = cpu_to_be32(target->rkey); | 847 | buf->key = cpu_to_be32(target->rkey); |
772 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); | 848 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); |
773 | } else if (srp_map_fmr(target, scat, count, req, | 849 | |
774 | (void *) cmd->add_data)) { | 850 | req->nfmr = 0; |
775 | /* | 851 | goto map_complete; |
776 | * FMR mapping failed, and the scatterlist has more | 852 | } |
777 | * than one entry. Generate an indirect memory | 853 | |
778 | * descriptor. | 854 | /* We have more than one scatter/gather entry, so build our indirect |
779 | */ | 855 | * descriptor table, trying to merge as many entries with FMR as we |
780 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | 856 | * can. |
781 | struct scatterlist *sg; | 857 | */ |
782 | u32 datalen = 0; | 858 | indirect_hdr = (void *) cmd->add_data; |
783 | int i; | 859 | |
784 | 860 | ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, | |
785 | fmt = SRP_DATA_DESC_INDIRECT; | 861 | target->indirect_size, DMA_TO_DEVICE); |
786 | len = sizeof (struct srp_cmd) + | 862 | |
787 | sizeof (struct srp_indirect_buf) + | 863 | memset(&state, 0, sizeof(state)); |
788 | count * sizeof (struct srp_direct_buf); | 864 | state.desc = req->indirect_desc; |
789 | 865 | state.pages = req->map_page; | |
790 | scsi_for_each_sg(scmnd, sg, count, i) { | 866 | state.next_fmr = req->fmr_list; |
791 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); | 867 | |
792 | 868 | use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR; | |
793 | buf->desc_list[i].va = | 869 | |
794 | cpu_to_be64(ib_sg_dma_address(ibdev, sg)); | 870 | for_each_sg(scat, sg, count, i) { |
795 | buf->desc_list[i].key = | 871 | if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) { |
796 | cpu_to_be32(target->rkey); | 872 | /* FMR mapping failed, so backtrack to the first |
797 | buf->desc_list[i].len = cpu_to_be32(dma_len); | 873 | * unmapped entry and continue on without using FMR. |
798 | datalen += dma_len; | 874 | */ |
875 | dma_addr_t dma_addr; | ||
876 | unsigned int dma_len; | ||
877 | |||
878 | backtrack: | ||
879 | sg = state.unmapped_sg; | ||
880 | i = state.unmapped_index; | ||
881 | |||
882 | dma_addr = ib_sg_dma_address(ibdev, sg); | ||
883 | dma_len = ib_sg_dma_len(ibdev, sg); | ||
884 | dma_len -= (state.unmapped_addr - dma_addr); | ||
885 | dma_addr = state.unmapped_addr; | ||
886 | use_fmr = SRP_MAP_NO_FMR; | ||
887 | srp_map_desc(&state, dma_addr, dma_len, target->rkey); | ||
799 | } | 888 | } |
889 | } | ||
800 | 890 | ||
801 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 891 | if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target)) |
802 | cmd->data_out_desc_cnt = count; | 892 | goto backtrack; |
803 | else | ||
804 | cmd->data_in_desc_cnt = count; | ||
805 | 893 | ||
806 | buf->table_desc.va = | 894 | /* We've mapped the request, now pull as much of the indirect |
807 | cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); | 895 | * descriptor table as we can into the command buffer. If this |
808 | buf->table_desc.key = | 896 | * target is not using an external indirect table, we are |
809 | cpu_to_be32(target->rkey); | 897 | * guaranteed to fit into the command, as the SCSI layer won't |
810 | buf->table_desc.len = | 898 | * give us more S/G entries than we allow. |
811 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); | 899 | */ |
900 | req->nfmr = state.nfmr; | ||
901 | if (state.ndesc == 1) { | ||
902 | /* FMR mapping was able to collapse this to one entry, | ||
903 | * so use a direct descriptor. | ||
904 | */ | ||
905 | struct srp_direct_buf *buf = (void *) cmd->add_data; | ||
812 | 906 | ||
813 | buf->len = cpu_to_be32(datalen); | 907 | *buf = req->indirect_desc[0]; |
908 | goto map_complete; | ||
909 | } | ||
910 | |||
911 | if (unlikely(target->cmd_sg_cnt < state.ndesc && | ||
912 | !target->allow_ext_sg)) { | ||
913 | shost_printk(KERN_ERR, target->scsi_host, | ||
914 | "Could not fit S/G list into SRP_CMD\n"); | ||
915 | return -EIO; | ||
814 | } | 916 | } |
815 | 917 | ||
918 | count = min(state.ndesc, target->cmd_sg_cnt); | ||
919 | table_len = state.ndesc * sizeof (struct srp_direct_buf); | ||
920 | |||
921 | fmt = SRP_DATA_DESC_INDIRECT; | ||
922 | len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); | ||
923 | len += count * sizeof (struct srp_direct_buf); | ||
924 | |||
925 | memcpy(indirect_hdr->desc_list, req->indirect_desc, | ||
926 | count * sizeof (struct srp_direct_buf)); | ||
927 | |||
928 | indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); | ||
929 | indirect_hdr->table_desc.key = cpu_to_be32(target->rkey); | ||
930 | indirect_hdr->table_desc.len = cpu_to_be32(table_len); | ||
931 | indirect_hdr->len = cpu_to_be32(state.total_len); | ||
932 | |||
933 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | ||
934 | cmd->data_out_desc_cnt = count; | ||
935 | else | ||
936 | cmd->data_in_desc_cnt = count; | ||
937 | |||
938 | ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, | ||
939 | DMA_TO_DEVICE); | ||
940 | |||
941 | map_complete: | ||
816 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 942 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
817 | cmd->buf_fmt = fmt << 4; | 943 | cmd->buf_fmt = fmt << 4; |
818 | else | 944 | else |
@@ -1140,7 +1266,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1140 | spin_unlock_irqrestore(&target->lock, flags); | 1266 | spin_unlock_irqrestore(&target->lock, flags); |
1141 | 1267 | ||
1142 | dev = target->srp_host->srp_dev->dev; | 1268 | dev = target->srp_host->srp_dev->dev; |
1143 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, | 1269 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, |
1144 | DMA_TO_DEVICE); | 1270 | DMA_TO_DEVICE); |
1145 | 1271 | ||
1146 | scmnd->result = 0; | 1272 | scmnd->result = 0; |
@@ -1164,7 +1290,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1164 | goto err_iu; | 1290 | goto err_iu; |
1165 | } | 1291 | } |
1166 | 1292 | ||
1167 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, | 1293 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, |
1168 | DMA_TO_DEVICE); | 1294 | DMA_TO_DEVICE); |
1169 | 1295 | ||
1170 | if (srp_post_send(target, iu, len)) { | 1296 | if (srp_post_send(target, iu, len)) { |
@@ -1204,7 +1330,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) | |||
1204 | 1330 | ||
1205 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | 1331 | for (i = 0; i < SRP_SQ_SIZE; ++i) { |
1206 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, | 1332 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, |
1207 | srp_max_iu_len, | 1333 | target->max_iu_len, |
1208 | GFP_KERNEL, DMA_TO_DEVICE); | 1334 | GFP_KERNEL, DMA_TO_DEVICE); |
1209 | if (!target->tx_ring[i]) | 1335 | if (!target->tx_ring[i]) |
1210 | goto err; | 1336 | goto err; |
@@ -1228,6 +1354,78 @@ err: | |||
1228 | return -ENOMEM; | 1354 | return -ENOMEM; |
1229 | } | 1355 | } |
1230 | 1356 | ||
1357 | static void srp_cm_rep_handler(struct ib_cm_id *cm_id, | ||
1358 | struct srp_login_rsp *lrsp, | ||
1359 | struct srp_target_port *target) | ||
1360 | { | ||
1361 | struct ib_qp_attr *qp_attr = NULL; | ||
1362 | int attr_mask = 0; | ||
1363 | int ret; | ||
1364 | int i; | ||
1365 | |||
1366 | if (lrsp->opcode == SRP_LOGIN_RSP) { | ||
1367 | target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); | ||
1368 | target->req_lim = be32_to_cpu(lrsp->req_lim_delta); | ||
1369 | |||
1370 | /* | ||
1371 | * Reserve credits for task management so we don't | ||
1372 | * bounce requests back to the SCSI mid-layer. | ||
1373 | */ | ||
1374 | target->scsi_host->can_queue | ||
1375 | = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, | ||
1376 | target->scsi_host->can_queue); | ||
1377 | } else { | ||
1378 | shost_printk(KERN_WARNING, target->scsi_host, | ||
1379 | PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); | ||
1380 | ret = -ECONNRESET; | ||
1381 | goto error; | ||
1382 | } | ||
1383 | |||
1384 | if (!target->rx_ring[0]) { | ||
1385 | ret = srp_alloc_iu_bufs(target); | ||
1386 | if (ret) | ||
1387 | goto error; | ||
1388 | } | ||
1389 | |||
1390 | ret = -ENOMEM; | ||
1391 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | ||
1392 | if (!qp_attr) | ||
1393 | goto error; | ||
1394 | |||
1395 | qp_attr->qp_state = IB_QPS_RTR; | ||
1396 | ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1397 | if (ret) | ||
1398 | goto error_free; | ||
1399 | |||
1400 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1401 | if (ret) | ||
1402 | goto error_free; | ||
1403 | |||
1404 | for (i = 0; i < SRP_RQ_SIZE; i++) { | ||
1405 | struct srp_iu *iu = target->rx_ring[i]; | ||
1406 | ret = srp_post_recv(target, iu); | ||
1407 | if (ret) | ||
1408 | goto error_free; | ||
1409 | } | ||
1410 | |||
1411 | qp_attr->qp_state = IB_QPS_RTS; | ||
1412 | ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1413 | if (ret) | ||
1414 | goto error_free; | ||
1415 | |||
1416 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1417 | if (ret) | ||
1418 | goto error_free; | ||
1419 | |||
1420 | ret = ib_send_cm_rtu(cm_id, NULL, 0); | ||
1421 | |||
1422 | error_free: | ||
1423 | kfree(qp_attr); | ||
1424 | |||
1425 | error: | ||
1426 | target->status = ret; | ||
1427 | } | ||
1428 | |||
1231 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | 1429 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, |
1232 | struct ib_cm_event *event, | 1430 | struct ib_cm_event *event, |
1233 | struct srp_target_port *target) | 1431 | struct srp_target_port *target) |
@@ -1311,11 +1509,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |||
1311 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | 1509 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) |
1312 | { | 1510 | { |
1313 | struct srp_target_port *target = cm_id->context; | 1511 | struct srp_target_port *target = cm_id->context; |
1314 | struct ib_qp_attr *qp_attr = NULL; | ||
1315 | int attr_mask = 0; | ||
1316 | int comp = 0; | 1512 | int comp = 0; |
1317 | int opcode = 0; | ||
1318 | int i; | ||
1319 | 1513 | ||
1320 | switch (event->event) { | 1514 | switch (event->event) { |
1321 | case IB_CM_REQ_ERROR: | 1515 | case IB_CM_REQ_ERROR: |
@@ -1327,71 +1521,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1327 | 1521 | ||
1328 | case IB_CM_REP_RECEIVED: | 1522 | case IB_CM_REP_RECEIVED: |
1329 | comp = 1; | 1523 | comp = 1; |
1330 | opcode = *(u8 *) event->private_data; | 1524 | srp_cm_rep_handler(cm_id, event->private_data, target); |
1331 | |||
1332 | if (opcode == SRP_LOGIN_RSP) { | ||
1333 | struct srp_login_rsp *rsp = event->private_data; | ||
1334 | |||
1335 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); | ||
1336 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); | ||
1337 | |||
1338 | /* | ||
1339 | * Reserve credits for task management so we don't | ||
1340 | * bounce requests back to the SCSI mid-layer. | ||
1341 | */ | ||
1342 | target->scsi_host->can_queue | ||
1343 | = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, | ||
1344 | target->scsi_host->can_queue); | ||
1345 | } else { | ||
1346 | shost_printk(KERN_WARNING, target->scsi_host, | ||
1347 | PFX "Unhandled RSP opcode %#x\n", opcode); | ||
1348 | target->status = -ECONNRESET; | ||
1349 | break; | ||
1350 | } | ||
1351 | |||
1352 | if (!target->rx_ring[0]) { | ||
1353 | target->status = srp_alloc_iu_bufs(target); | ||
1354 | if (target->status) | ||
1355 | break; | ||
1356 | } | ||
1357 | |||
1358 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | ||
1359 | if (!qp_attr) { | ||
1360 | target->status = -ENOMEM; | ||
1361 | break; | ||
1362 | } | ||
1363 | |||
1364 | qp_attr->qp_state = IB_QPS_RTR; | ||
1365 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1366 | if (target->status) | ||
1367 | break; | ||
1368 | |||
1369 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1370 | if (target->status) | ||
1371 | break; | ||
1372 | |||
1373 | for (i = 0; i < SRP_RQ_SIZE; i++) { | ||
1374 | struct srp_iu *iu = target->rx_ring[i]; | ||
1375 | target->status = srp_post_recv(target, iu); | ||
1376 | if (target->status) | ||
1377 | break; | ||
1378 | } | ||
1379 | if (target->status) | ||
1380 | break; | ||
1381 | |||
1382 | qp_attr->qp_state = IB_QPS_RTS; | ||
1383 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1384 | if (target->status) | ||
1385 | break; | ||
1386 | |||
1387 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1388 | if (target->status) | ||
1389 | break; | ||
1390 | |||
1391 | target->status = ib_send_cm_rtu(cm_id, NULL, 0); | ||
1392 | if (target->status) | ||
1393 | break; | ||
1394 | |||
1395 | break; | 1525 | break; |
1396 | 1526 | ||
1397 | case IB_CM_REJ_RECEIVED: | 1527 | case IB_CM_REJ_RECEIVED: |
@@ -1431,8 +1561,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1431 | if (comp) | 1561 | if (comp) |
1432 | complete(&target->done); | 1562 | complete(&target->done); |
1433 | 1563 | ||
1434 | kfree(qp_attr); | ||
1435 | |||
1436 | return 0; | 1564 | return 0; |
1437 | } | 1565 | } |
1438 | 1566 | ||
@@ -1658,6 +1786,22 @@ static ssize_t show_local_ib_device(struct device *dev, | |||
1658 | return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); | 1786 | return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); |
1659 | } | 1787 | } |
1660 | 1788 | ||
1789 | static ssize_t show_cmd_sg_entries(struct device *dev, | ||
1790 | struct device_attribute *attr, char *buf) | ||
1791 | { | ||
1792 | struct srp_target_port *target = host_to_target(class_to_shost(dev)); | ||
1793 | |||
1794 | return sprintf(buf, "%u\n", target->cmd_sg_cnt); | ||
1795 | } | ||
1796 | |||
1797 | static ssize_t show_allow_ext_sg(struct device *dev, | ||
1798 | struct device_attribute *attr, char *buf) | ||
1799 | { | ||
1800 | struct srp_target_port *target = host_to_target(class_to_shost(dev)); | ||
1801 | |||
1802 | return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); | ||
1803 | } | ||
1804 | |||
1661 | static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); | 1805 | static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); |
1662 | static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); | 1806 | static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); |
1663 | static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); | 1807 | static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); |
@@ -1668,6 +1812,8 @@ static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); | |||
1668 | static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); | 1812 | static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); |
1669 | static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); | 1813 | static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); |
1670 | static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); | 1814 | static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); |
1815 | static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); | ||
1816 | static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); | ||
1671 | 1817 | ||
1672 | static struct device_attribute *srp_host_attrs[] = { | 1818 | static struct device_attribute *srp_host_attrs[] = { |
1673 | &dev_attr_id_ext, | 1819 | &dev_attr_id_ext, |
@@ -1680,6 +1826,8 @@ static struct device_attribute *srp_host_attrs[] = { | |||
1680 | &dev_attr_zero_req_lim, | 1826 | &dev_attr_zero_req_lim, |
1681 | &dev_attr_local_ib_port, | 1827 | &dev_attr_local_ib_port, |
1682 | &dev_attr_local_ib_device, | 1828 | &dev_attr_local_ib_device, |
1829 | &dev_attr_cmd_sg_entries, | ||
1830 | &dev_attr_allow_ext_sg, | ||
1683 | NULL | 1831 | NULL |
1684 | }; | 1832 | }; |
1685 | 1833 | ||
@@ -1692,6 +1840,7 @@ static struct scsi_host_template srp_template = { | |||
1692 | .eh_abort_handler = srp_abort, | 1840 | .eh_abort_handler = srp_abort, |
1693 | .eh_device_reset_handler = srp_reset_device, | 1841 | .eh_device_reset_handler = srp_reset_device, |
1694 | .eh_host_reset_handler = srp_reset_host, | 1842 | .eh_host_reset_handler = srp_reset_host, |
1843 | .sg_tablesize = SRP_DEF_SG_TABLESIZE, | ||
1695 | .can_queue = SRP_CMD_SQ_SIZE, | 1844 | .can_queue = SRP_CMD_SQ_SIZE, |
1696 | .this_id = -1, | 1845 | .this_id = -1, |
1697 | .cmd_per_lun = SRP_CMD_SQ_SIZE, | 1846 | .cmd_per_lun = SRP_CMD_SQ_SIZE, |
@@ -1763,6 +1912,9 @@ enum { | |||
1763 | SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, | 1912 | SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, |
1764 | SRP_OPT_IO_CLASS = 1 << 7, | 1913 | SRP_OPT_IO_CLASS = 1 << 7, |
1765 | SRP_OPT_INITIATOR_EXT = 1 << 8, | 1914 | SRP_OPT_INITIATOR_EXT = 1 << 8, |
1915 | SRP_OPT_CMD_SG_ENTRIES = 1 << 9, | ||
1916 | SRP_OPT_ALLOW_EXT_SG = 1 << 10, | ||
1917 | SRP_OPT_SG_TABLESIZE = 1 << 11, | ||
1766 | SRP_OPT_ALL = (SRP_OPT_ID_EXT | | 1918 | SRP_OPT_ALL = (SRP_OPT_ID_EXT | |
1767 | SRP_OPT_IOC_GUID | | 1919 | SRP_OPT_IOC_GUID | |
1768 | SRP_OPT_DGID | | 1920 | SRP_OPT_DGID | |
@@ -1780,6 +1932,9 @@ static const match_table_t srp_opt_tokens = { | |||
1780 | { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, | 1932 | { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, |
1781 | { SRP_OPT_IO_CLASS, "io_class=%x" }, | 1933 | { SRP_OPT_IO_CLASS, "io_class=%x" }, |
1782 | { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, | 1934 | { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, |
1935 | { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, | ||
1936 | { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, | ||
1937 | { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, | ||
1783 | { SRP_OPT_ERR, NULL } | 1938 | { SRP_OPT_ERR, NULL } |
1784 | }; | 1939 | }; |
1785 | 1940 | ||
@@ -1907,6 +2062,31 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) | |||
1907 | kfree(p); | 2062 | kfree(p); |
1908 | break; | 2063 | break; |
1909 | 2064 | ||
2065 | case SRP_OPT_CMD_SG_ENTRIES: | ||
2066 | if (match_int(args, &token) || token < 1 || token > 255) { | ||
2067 | printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p); | ||
2068 | goto out; | ||
2069 | } | ||
2070 | target->cmd_sg_cnt = token; | ||
2071 | break; | ||
2072 | |||
2073 | case SRP_OPT_ALLOW_EXT_SG: | ||
2074 | if (match_int(args, &token)) { | ||
2075 | printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p); | ||
2076 | goto out; | ||
2077 | } | ||
2078 | target->allow_ext_sg = !!token; | ||
2079 | break; | ||
2080 | |||
2081 | case SRP_OPT_SG_TABLESIZE: | ||
2082 | if (match_int(args, &token) || token < 1 || | ||
2083 | token > SCSI_MAX_SG_CHAIN_SEGMENTS) { | ||
2084 | printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p); | ||
2085 | goto out; | ||
2086 | } | ||
2087 | target->sg_tablesize = token; | ||
2088 | break; | ||
2089 | |||
1910 | default: | 2090 | default: |
1911 | printk(KERN_WARNING PFX "unknown parameter or missing value " | 2091 | printk(KERN_WARNING PFX "unknown parameter or missing value " |
1912 | "'%s' in target creation request\n", p); | 2092 | "'%s' in target creation request\n", p); |
@@ -1937,39 +2117,73 @@ static ssize_t srp_create_target(struct device *dev, | |||
1937 | container_of(dev, struct srp_host, dev); | 2117 | container_of(dev, struct srp_host, dev); |
1938 | struct Scsi_Host *target_host; | 2118 | struct Scsi_Host *target_host; |
1939 | struct srp_target_port *target; | 2119 | struct srp_target_port *target; |
1940 | int ret; | 2120 | struct ib_device *ibdev = host->srp_dev->dev; |
1941 | int i; | 2121 | dma_addr_t dma_addr; |
2122 | int i, ret; | ||
1942 | 2123 | ||
1943 | target_host = scsi_host_alloc(&srp_template, | 2124 | target_host = scsi_host_alloc(&srp_template, |
1944 | sizeof (struct srp_target_port)); | 2125 | sizeof (struct srp_target_port)); |
1945 | if (!target_host) | 2126 | if (!target_host) |
1946 | return -ENOMEM; | 2127 | return -ENOMEM; |
1947 | 2128 | ||
1948 | target_host->transportt = ib_srp_transport_template; | 2129 | target_host->transportt = ib_srp_transport_template; |
1949 | target_host->max_lun = SRP_MAX_LUN; | 2130 | target_host->max_lun = SRP_MAX_LUN; |
1950 | target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; | 2131 | target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; |
1951 | 2132 | ||
1952 | target = host_to_target(target_host); | 2133 | target = host_to_target(target_host); |
1953 | 2134 | ||
1954 | target->io_class = SRP_REV16A_IB_IO_CLASS; | 2135 | target->io_class = SRP_REV16A_IB_IO_CLASS; |
1955 | target->scsi_host = target_host; | 2136 | target->scsi_host = target_host; |
1956 | target->srp_host = host; | 2137 | target->srp_host = host; |
1957 | target->lkey = host->srp_dev->mr->lkey; | 2138 | target->lkey = host->srp_dev->mr->lkey; |
1958 | target->rkey = host->srp_dev->mr->rkey; | 2139 | target->rkey = host->srp_dev->mr->rkey; |
2140 | target->cmd_sg_cnt = cmd_sg_entries; | ||
2141 | target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; | ||
2142 | target->allow_ext_sg = allow_ext_sg; | ||
2143 | |||
2144 | ret = srp_parse_options(buf, target); | ||
2145 | if (ret) | ||
2146 | goto err; | ||
2147 | |||
2148 | if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && | ||
2149 | target->cmd_sg_cnt < target->sg_tablesize) { | ||
2150 | printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); | ||
2151 | target->sg_tablesize = target->cmd_sg_cnt; | ||
2152 | } | ||
2153 | |||
2154 | target_host->sg_tablesize = target->sg_tablesize; | ||
2155 | target->indirect_size = target->sg_tablesize * | ||
2156 | sizeof (struct srp_direct_buf); | ||
2157 | target->max_iu_len = sizeof (struct srp_cmd) + | ||
2158 | sizeof (struct srp_indirect_buf) + | ||
2159 | target->cmd_sg_cnt * sizeof (struct srp_direct_buf); | ||
1959 | 2160 | ||
1960 | spin_lock_init(&target->lock); | 2161 | spin_lock_init(&target->lock); |
1961 | INIT_LIST_HEAD(&target->free_tx); | 2162 | INIT_LIST_HEAD(&target->free_tx); |
1962 | INIT_LIST_HEAD(&target->free_reqs); | 2163 | INIT_LIST_HEAD(&target->free_reqs); |
1963 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 2164 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
1964 | target->req_ring[i].index = i; | 2165 | struct srp_request *req = &target->req_ring[i]; |
1965 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
1966 | } | ||
1967 | 2166 | ||
1968 | ret = srp_parse_options(buf, target); | 2167 | req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *), |
1969 | if (ret) | 2168 | GFP_KERNEL); |
1970 | goto err; | 2169 | req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *), |
2170 | GFP_KERNEL); | ||
2171 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); | ||
2172 | if (!req->fmr_list || !req->map_page || !req->indirect_desc) | ||
2173 | goto err_free_mem; | ||
2174 | |||
2175 | dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, | ||
2176 | target->indirect_size, | ||
2177 | DMA_TO_DEVICE); | ||
2178 | if (ib_dma_mapping_error(ibdev, dma_addr)) | ||
2179 | goto err_free_mem; | ||
2180 | |||
2181 | req->indirect_dma_addr = dma_addr; | ||
2182 | req->index = i; | ||
2183 | list_add_tail(&req->list, &target->free_reqs); | ||
2184 | } | ||
1971 | 2185 | ||
1972 | ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); | 2186 | ib_query_gid(ibdev, host->port, 0, &target->path.sgid); |
1973 | 2187 | ||
1974 | shost_printk(KERN_DEBUG, target->scsi_host, PFX | 2188 | shost_printk(KERN_DEBUG, target->scsi_host, PFX |
1975 | "new target: id_ext %016llx ioc_guid %016llx pkey %04x " | 2189 | "new target: id_ext %016llx ioc_guid %016llx pkey %04x " |
@@ -1982,11 +2196,11 @@ static ssize_t srp_create_target(struct device *dev, | |||
1982 | 2196 | ||
1983 | ret = srp_create_target_ib(target); | 2197 | ret = srp_create_target_ib(target); |
1984 | if (ret) | 2198 | if (ret) |
1985 | goto err; | 2199 | goto err_free_mem; |
1986 | 2200 | ||
1987 | ret = srp_new_cm_id(target); | 2201 | ret = srp_new_cm_id(target); |
1988 | if (ret) | 2202 | if (ret) |
1989 | goto err_free; | 2203 | goto err_free_ib; |
1990 | 2204 | ||
1991 | target->qp_in_error = 0; | 2205 | target->qp_in_error = 0; |
1992 | ret = srp_connect_target(target); | 2206 | ret = srp_connect_target(target); |
@@ -2008,9 +2222,12 @@ err_disconnect: | |||
2008 | err_cm_id: | 2222 | err_cm_id: |
2009 | ib_destroy_cm_id(target->cm_id); | 2223 | ib_destroy_cm_id(target->cm_id); |
2010 | 2224 | ||
2011 | err_free: | 2225 | err_free_ib: |
2012 | srp_free_target_ib(target); | 2226 | srp_free_target_ib(target); |
2013 | 2227 | ||
2228 | err_free_mem: | ||
2229 | srp_free_req_data(target); | ||
2230 | |||
2014 | err: | 2231 | err: |
2015 | scsi_host_put(target_host); | 2232 | scsi_host_put(target_host); |
2016 | 2233 | ||
@@ -2083,7 +2300,7 @@ static void srp_add_one(struct ib_device *device) | |||
2083 | struct ib_device_attr *dev_attr; | 2300 | struct ib_device_attr *dev_attr; |
2084 | struct ib_fmr_pool_param fmr_param; | 2301 | struct ib_fmr_pool_param fmr_param; |
2085 | struct srp_host *host; | 2302 | struct srp_host *host; |
2086 | int s, e, p; | 2303 | int max_pages_per_fmr, fmr_page_shift, s, e, p; |
2087 | 2304 | ||
2088 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); | 2305 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); |
2089 | if (!dev_attr) | 2306 | if (!dev_attr) |
@@ -2101,12 +2318,13 @@ static void srp_add_one(struct ib_device *device) | |||
2101 | 2318 | ||
2102 | /* | 2319 | /* |
2103 | * Use the smallest page size supported by the HCA, down to a | 2320 | * Use the smallest page size supported by the HCA, down to a |
2104 | * minimum of 512 bytes (which is the smallest sector that a | 2321 | * minimum of 4096 bytes. We're unlikely to build large sglists |
2105 | * SCSI command will ever carry). | 2322 | * out of smaller entries. |
2106 | */ | 2323 | */ |
2107 | srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); | 2324 | fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); |
2108 | srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; | 2325 | srp_dev->fmr_page_size = 1 << fmr_page_shift; |
2109 | srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); | 2326 | srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); |
2327 | srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE; | ||
2110 | 2328 | ||
2111 | INIT_LIST_HEAD(&srp_dev->dev_list); | 2329 | INIT_LIST_HEAD(&srp_dev->dev_list); |
2112 | 2330 | ||
@@ -2122,17 +2340,24 @@ static void srp_add_one(struct ib_device *device) | |||
2122 | if (IS_ERR(srp_dev->mr)) | 2340 | if (IS_ERR(srp_dev->mr)) |
2123 | goto err_pd; | 2341 | goto err_pd; |
2124 | 2342 | ||
2125 | memset(&fmr_param, 0, sizeof fmr_param); | 2343 | for (max_pages_per_fmr = SRP_FMR_SIZE; |
2126 | fmr_param.pool_size = SRP_FMR_POOL_SIZE; | 2344 | max_pages_per_fmr >= SRP_FMR_MIN_SIZE; |
2127 | fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; | 2345 | max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) { |
2128 | fmr_param.cache = 1; | 2346 | memset(&fmr_param, 0, sizeof fmr_param); |
2129 | fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; | 2347 | fmr_param.pool_size = SRP_FMR_POOL_SIZE; |
2130 | fmr_param.page_shift = srp_dev->fmr_page_shift; | 2348 | fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; |
2131 | fmr_param.access = (IB_ACCESS_LOCAL_WRITE | | 2349 | fmr_param.cache = 1; |
2132 | IB_ACCESS_REMOTE_WRITE | | 2350 | fmr_param.max_pages_per_fmr = max_pages_per_fmr; |
2133 | IB_ACCESS_REMOTE_READ); | 2351 | fmr_param.page_shift = fmr_page_shift; |
2134 | 2352 | fmr_param.access = (IB_ACCESS_LOCAL_WRITE | | |
2135 | srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); | 2353 | IB_ACCESS_REMOTE_WRITE | |
2354 | IB_ACCESS_REMOTE_READ); | ||
2355 | |||
2356 | srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); | ||
2357 | if (!IS_ERR(srp_dev->fmr_pool)) | ||
2358 | break; | ||
2359 | } | ||
2360 | |||
2136 | if (IS_ERR(srp_dev->fmr_pool)) | 2361 | if (IS_ERR(srp_dev->fmr_pool)) |
2137 | srp_dev->fmr_pool = NULL; | 2362 | srp_dev->fmr_pool = NULL; |
2138 | 2363 | ||
@@ -2207,6 +2432,7 @@ static void srp_remove_one(struct ib_device *device) | |||
2207 | srp_disconnect_target(target); | 2432 | srp_disconnect_target(target); |
2208 | ib_destroy_cm_id(target->cm_id); | 2433 | ib_destroy_cm_id(target->cm_id); |
2209 | srp_free_target_ib(target); | 2434 | srp_free_target_ib(target); |
2435 | srp_free_req_data(target); | ||
2210 | scsi_host_put(target->scsi_host); | 2436 | scsi_host_put(target->scsi_host); |
2211 | } | 2437 | } |
2212 | 2438 | ||
@@ -2230,9 +2456,25 @@ static int __init srp_init_module(void) | |||
2230 | 2456 | ||
2231 | BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); | 2457 | BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); |
2232 | 2458 | ||
2233 | if (srp_sg_tablesize > 255) { | 2459 | if (srp_sg_tablesize) { |
2234 | printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); | 2460 | printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); |
2235 | srp_sg_tablesize = 255; | 2461 | if (!cmd_sg_entries) |
2462 | cmd_sg_entries = srp_sg_tablesize; | ||
2463 | } | ||
2464 | |||
2465 | if (!cmd_sg_entries) | ||
2466 | cmd_sg_entries = SRP_DEF_SG_TABLESIZE; | ||
2467 | |||
2468 | if (cmd_sg_entries > 255) { | ||
2469 | printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n"); | ||
2470 | cmd_sg_entries = 255; | ||
2471 | } | ||
2472 | |||
2473 | if (!indirect_sg_entries) | ||
2474 | indirect_sg_entries = cmd_sg_entries; | ||
2475 | else if (indirect_sg_entries < cmd_sg_entries) { | ||
2476 | printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries); | ||
2477 | indirect_sg_entries = cmd_sg_entries; | ||
2236 | } | 2478 | } |
2237 | 2479 | ||
2238 | ib_srp_transport_template = | 2480 | ib_srp_transport_template = |
@@ -2240,11 +2482,6 @@ static int __init srp_init_module(void) | |||
2240 | if (!ib_srp_transport_template) | 2482 | if (!ib_srp_transport_template) |
2241 | return -ENOMEM; | 2483 | return -ENOMEM; |
2242 | 2484 | ||
2243 | srp_template.sg_tablesize = srp_sg_tablesize; | ||
2244 | srp_max_iu_len = (sizeof (struct srp_cmd) + | ||
2245 | sizeof (struct srp_indirect_buf) + | ||
2246 | srp_sg_tablesize * 16); | ||
2247 | |||
2248 | ret = class_register(&srp_class); | 2485 | ret = class_register(&srp_class); |
2249 | if (ret) { | 2486 | if (ret) { |
2250 | printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); | 2487 | printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 9dc6fc3fd894..020caf0c3789 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -69,9 +69,13 @@ enum { | |||
69 | SRP_TAG_NO_REQ = ~0U, | 69 | SRP_TAG_NO_REQ = ~0U, |
70 | SRP_TAG_TSK_MGMT = 1U << 31, | 70 | SRP_TAG_TSK_MGMT = 1U << 31, |
71 | 71 | ||
72 | SRP_FMR_SIZE = 256, | 72 | SRP_FMR_SIZE = 512, |
73 | SRP_FMR_MIN_SIZE = 128, | ||
73 | SRP_FMR_POOL_SIZE = 1024, | 74 | SRP_FMR_POOL_SIZE = 1024, |
74 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 | 75 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4, |
76 | |||
77 | SRP_MAP_ALLOW_FMR = 0, | ||
78 | SRP_MAP_NO_FMR = 1, | ||
75 | }; | 79 | }; |
76 | 80 | ||
77 | enum srp_target_state { | 81 | enum srp_target_state { |
@@ -93,9 +97,9 @@ struct srp_device { | |||
93 | struct ib_pd *pd; | 97 | struct ib_pd *pd; |
94 | struct ib_mr *mr; | 98 | struct ib_mr *mr; |
95 | struct ib_fmr_pool *fmr_pool; | 99 | struct ib_fmr_pool *fmr_pool; |
96 | int fmr_page_shift; | ||
97 | int fmr_page_size; | ||
98 | u64 fmr_page_mask; | 100 | u64 fmr_page_mask; |
101 | int fmr_page_size; | ||
102 | int fmr_max_size; | ||
99 | }; | 103 | }; |
100 | 104 | ||
101 | struct srp_host { | 105 | struct srp_host { |
@@ -112,7 +116,11 @@ struct srp_request { | |||
112 | struct list_head list; | 116 | struct list_head list; |
113 | struct scsi_cmnd *scmnd; | 117 | struct scsi_cmnd *scmnd; |
114 | struct srp_iu *cmd; | 118 | struct srp_iu *cmd; |
115 | struct ib_pool_fmr *fmr; | 119 | struct ib_pool_fmr **fmr_list; |
120 | u64 *map_page; | ||
121 | struct srp_direct_buf *indirect_desc; | ||
122 | dma_addr_t indirect_dma_addr; | ||
123 | short nfmr; | ||
116 | short index; | 124 | short index; |
117 | }; | 125 | }; |
118 | 126 | ||
@@ -130,6 +138,10 @@ struct srp_target_port { | |||
130 | u32 lkey; | 138 | u32 lkey; |
131 | u32 rkey; | 139 | u32 rkey; |
132 | enum srp_target_state state; | 140 | enum srp_target_state state; |
141 | unsigned int max_iu_len; | ||
142 | unsigned int cmd_sg_cnt; | ||
143 | unsigned int indirect_size; | ||
144 | bool allow_ext_sg; | ||
133 | 145 | ||
134 | /* Everything above this point is used in the hot path of | 146 | /* Everything above this point is used in the hot path of |
135 | * command processing. Try to keep them packed into cachelines. | 147 | * command processing. Try to keep them packed into cachelines. |
@@ -144,6 +156,7 @@ struct srp_target_port { | |||
144 | struct Scsi_Host *scsi_host; | 156 | struct Scsi_Host *scsi_host; |
145 | char target_name[32]; | 157 | char target_name[32]; |
146 | unsigned int scsi_id; | 158 | unsigned int scsi_id; |
159 | unsigned int sg_tablesize; | ||
147 | 160 | ||
148 | struct ib_sa_path_rec path; | 161 | struct ib_sa_path_rec path; |
149 | __be16 orig_dgid[8]; | 162 | __be16 orig_dgid[8]; |
@@ -179,4 +192,19 @@ struct srp_iu { | |||
179 | enum dma_data_direction direction; | 192 | enum dma_data_direction direction; |
180 | }; | 193 | }; |
181 | 194 | ||
195 | struct srp_map_state { | ||
196 | struct ib_pool_fmr **next_fmr; | ||
197 | struct srp_direct_buf *desc; | ||
198 | u64 *pages; | ||
199 | dma_addr_t base_dma_addr; | ||
200 | u32 fmr_len; | ||
201 | u32 total_len; | ||
202 | unsigned int npages; | ||
203 | unsigned int nfmr; | ||
204 | unsigned int ndesc; | ||
205 | struct scatterlist *unmapped_sg; | ||
206 | int unmapped_index; | ||
207 | dma_addr_t unmapped_addr; | ||
208 | }; | ||
209 | |||
182 | #endif /* IB_SRP_H */ | 210 | #endif /* IB_SRP_H */ |