aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid Dillow <dillowda@ornl.gov>2011-01-14 19:45:50 -0500
committerDavid Dillow <dillowda@ornl.gov>2011-03-15 19:35:16 -0400
commit8f26c9ff9cd0317ad867bce972f69e0c6c2cbe3c (patch)
treee130e878b4e13cbac4892011cc18aa8fef5257fb /drivers/infiniband
parent4924864404d0ce2c32a6d20b27b5b6fcb31e481d (diff)
IB/srp: rework mapping engine to use multiple FMR entries
Instead of forcing all of the S/G entries to fit in one FMR, and falling back to indirect descriptors if that fails, allow the use of as many FMRs as needed to map the request. This lays the groundwork for allowing indirect descriptor tables that are larger than can fit in the command IU, but should marginally improve performance now by reducing the number of indirect descriptors needed. We increase the minimum page size for the FMR pool to 4K, as larger pages help increase the coverage of each FMR, and it is rare that the kernel would send down a request with scattered 512 byte fragments. This patch also move some of the target initialization code afte the parsing of options, to keep it together with the new code that needs to allocate memory based on the options given. Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c367
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h28
2 files changed, 266 insertions, 129 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 6f8ee0c7ef5f..9ce129ab3beb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -444,6 +444,17 @@ static bool srp_change_state(struct srp_target_port *target,
444 return changed; 444 return changed;
445} 445}
446 446
447static void srp_free_req_data(struct srp_target_port *target)
448{
449 struct srp_request *req;
450 int i;
451
452 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
453 kfree(req->fmr_list);
454 kfree(req->map_page);
455 }
456}
457
447static void srp_remove_work(struct work_struct *work) 458static void srp_remove_work(struct work_struct *work)
448{ 459{
449 struct srp_target_port *target = 460 struct srp_target_port *target =
@@ -460,6 +471,7 @@ static void srp_remove_work(struct work_struct *work)
460 scsi_remove_host(target->scsi_host); 471 scsi_remove_host(target->scsi_host);
461 ib_destroy_cm_id(target->cm_id); 472 ib_destroy_cm_id(target->cm_id);
462 srp_free_target_ib(target); 473 srp_free_target_ib(target);
474 srp_free_req_data(target);
463 scsi_host_put(target->scsi_host); 475 scsi_host_put(target->scsi_host);
464} 476}
465 477
@@ -523,18 +535,20 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
523 struct srp_target_port *target, 535 struct srp_target_port *target,
524 struct srp_request *req) 536 struct srp_request *req)
525{ 537{
538 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
539 struct ib_pool_fmr **pfmr;
540
526 if (!scsi_sglist(scmnd) || 541 if (!scsi_sglist(scmnd) ||
527 (scmnd->sc_data_direction != DMA_TO_DEVICE && 542 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
528 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 543 scmnd->sc_data_direction != DMA_FROM_DEVICE))
529 return; 544 return;
530 545
531 if (req->fmr) { 546 pfmr = req->fmr_list;
532 ib_fmr_pool_unmap(req->fmr); 547 while (req->nfmr--)
533 req->fmr = NULL; 548 ib_fmr_pool_unmap(*pfmr++);
534 }
535 549
536 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), 550 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
537 scsi_sg_count(scmnd), scmnd->sc_data_direction); 551 scmnd->sc_data_direction);
538} 552}
539 553
540static void srp_remove_req(struct srp_target_port *target, 554static void srp_remove_req(struct srp_target_port *target,
@@ -633,95 +647,152 @@ err:
633 return ret; 647 return ret;
634} 648}
635 649
636static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 650static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
637 int sg_cnt, struct srp_request *req, 651 unsigned int dma_len, u32 rkey)
638 struct srp_direct_buf *buf)
639{ 652{
640 u64 io_addr = 0; 653 struct srp_direct_buf *desc = state->desc;
641 u64 *dma_pages;
642 u32 len;
643 int page_cnt;
644 int i, j;
645 int ret;
646 struct srp_device *dev = target->srp_host->srp_dev;
647 struct ib_device *ibdev = dev->dev;
648 struct scatterlist *sg;
649 654
650 if (!dev->fmr_pool) 655 desc->va = cpu_to_be64(dma_addr);
651 return -ENODEV; 656 desc->key = cpu_to_be32(rkey);
657 desc->len = cpu_to_be32(dma_len);
652 658
653 if (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) 659 state->total_len += dma_len;
654 return -EINVAL; 660 state->desc++;
661 state->ndesc++;
662}
655 663
656 len = page_cnt = 0; 664static int srp_map_finish_fmr(struct srp_map_state *state,
657 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 665 struct srp_target_port *target)
658 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 666{
667 struct srp_device *dev = target->srp_host->srp_dev;
668 struct ib_pool_fmr *fmr;
669 u64 io_addr = 0;
659 670
660 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 671 if (!state->npages)
661 if (i > 0) 672 return 0;
662 return -EINVAL;
663 else
664 ++page_cnt;
665 }
666 if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
667 ~dev->fmr_page_mask) {
668 if (i < sg_cnt - 1)
669 return -EINVAL;
670 else
671 ++page_cnt;
672 }
673 673
674 len += dma_len; 674 if (state->npages == 1) {
675 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
676 target->rkey);
677 state->npages = state->fmr_len = 0;
678 return 0;
675 } 679 }
676 680
677 page_cnt += len >> dev->fmr_page_shift; 681 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
678 if (page_cnt > SRP_FMR_SIZE) 682 state->npages, io_addr);
679 return -ENOMEM; 683 if (IS_ERR(fmr))
684 return PTR_ERR(fmr);
680 685
681 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 686 *state->next_fmr++ = fmr;
682 if (!dma_pages) 687 state->nfmr++;
683 return -ENOMEM;
684 688
685 page_cnt = 0; 689 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
686 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 690 state->npages = state->fmr_len = 0;
687 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 691 return 0;
692}
693
694static void srp_map_update_start(struct srp_map_state *state,
695 struct scatterlist *sg, int sg_index,
696 dma_addr_t dma_addr)
697{
698 state->unmapped_sg = sg;
699 state->unmapped_index = sg_index;
700 state->unmapped_addr = dma_addr;
701}
688 702
689 for (j = 0; j < dma_len; j += dev->fmr_page_size) 703static int srp_map_sg_entry(struct srp_map_state *state,
690 dma_pages[page_cnt++] = 704 struct srp_target_port *target,
691 (ib_sg_dma_address(ibdev, sg) & 705 struct scatterlist *sg, int sg_index,
692 dev->fmr_page_mask) + j; 706 int use_fmr)
707{
708 struct srp_device *dev = target->srp_host->srp_dev;
709 struct ib_device *ibdev = dev->dev;
710 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
711 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
712 unsigned int len;
713 int ret;
714
715 if (!dma_len)
716 return 0;
717
718 if (use_fmr == SRP_MAP_NO_FMR) {
719 /* Once we're in direct map mode for a request, we don't
720 * go back to FMR mode, so no need to update anything
721 * other than the descriptor.
722 */
723 srp_map_desc(state, dma_addr, dma_len, target->rkey);
724 return 0;
693 } 725 }
694 726
695 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 727 /* If we start at an offset into the FMR page, don't merge into
696 dma_pages, page_cnt, io_addr); 728 * the current FMR. Finish it out, and use the kernel's MR for this
697 if (IS_ERR(req->fmr)) { 729 * sg entry. This is to avoid potential bugs on some SRP targets
698 ret = PTR_ERR(req->fmr); 730 * that were never quite defined, but went away when the initiator
699 req->fmr = NULL; 731 * avoided using FMR on such page fragments.
700 goto out; 732 */
733 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
734 ret = srp_map_finish_fmr(state, target);
735 if (ret)
736 return ret;
737
738 srp_map_desc(state, dma_addr, dma_len, target->rkey);
739 srp_map_update_start(state, NULL, 0, 0);
740 return 0;
701 } 741 }
702 742
703 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 743 /* If this is the first sg to go into the FMR, save our position.
704 ~dev->fmr_page_mask); 744 * We need to know the first unmapped entry, its index, and the
705 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 745 * first unmapped address within that entry to be able to restart
706 buf->len = cpu_to_be32(len); 746 * mapping after an error.
747 */
748 if (!state->unmapped_sg)
749 srp_map_update_start(state, sg, sg_index, dma_addr);
707 750
708 ret = 0; 751 while (dma_len) {
752 if (state->npages == SRP_FMR_SIZE) {
753 ret = srp_map_finish_fmr(state, target);
754 if (ret)
755 return ret;
709 756
710out: 757 srp_map_update_start(state, sg, sg_index, dma_addr);
711 kfree(dma_pages); 758 }
759
760 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
712 761
762 if (!state->npages)
763 state->base_dma_addr = dma_addr;
764 state->pages[state->npages++] = dma_addr;
765 state->fmr_len += len;
766 dma_addr += len;
767 dma_len -= len;
768 }
769
770 /* If the last entry of the FMR wasn't a full page, then we need to
771 * close it out and start a new one -- we can only merge at page
772 * boundries.
773 */
774 ret = 0;
775 if (len != dev->fmr_page_size) {
776 ret = srp_map_finish_fmr(state, target);
777 if (!ret)
778 srp_map_update_start(state, NULL, 0, 0);
779 }
713 return ret; 780 return ret;
714} 781}
715 782
716static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 783static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
717 struct srp_request *req) 784 struct srp_request *req)
718{ 785{
719 struct scatterlist *scat; 786 struct scatterlist *scat, *sg;
720 struct srp_cmd *cmd = req->cmd->buf; 787 struct srp_cmd *cmd = req->cmd->buf;
721 int len, nents, count; 788 int i, len, nents, count, use_fmr;
722 u8 fmt = SRP_DATA_DESC_DIRECT;
723 struct srp_device *dev; 789 struct srp_device *dev;
724 struct ib_device *ibdev; 790 struct ib_device *ibdev;
791 struct srp_map_state state;
792 struct srp_indirect_buf *indirect_hdr;
793 dma_addr_t indirect_addr;
794 u32 table_len;
795 u8 fmt;
725 796
726 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 797 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
727 return sizeof (struct srp_cmd); 798 return sizeof (struct srp_cmd);
@@ -741,6 +812,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
741 ibdev = dev->dev; 812 ibdev = dev->dev;
742 813
743 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 814 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
815 if (unlikely(count == 0))
816 return -EIO;
744 817
745 fmt = SRP_DATA_DESC_DIRECT; 818 fmt = SRP_DATA_DESC_DIRECT;
746 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 819 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -757,49 +830,80 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
757 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 830 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
758 buf->key = cpu_to_be32(target->rkey); 831 buf->key = cpu_to_be32(target->rkey);
759 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 832 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
760 } else if (srp_map_fmr(target, scat, count, req, 833
761 (void *) cmd->add_data)) { 834 req->nfmr = 0;
762 /* 835 goto map_complete;
763 * FMR mapping failed, and the scatterlist has more 836 }
764 * than one entry. Generate an indirect memory 837
765 * descriptor. 838 /* We have more than one scatter/gather entry, so build our indirect
766 */ 839 * descriptor table, trying to merge as many entries with FMR as we
767 struct srp_indirect_buf *buf = (void *) cmd->add_data; 840 * can.
768 struct scatterlist *sg; 841 */
769 u32 datalen = 0; 842 indirect_hdr = (void *) cmd->add_data;
770 int i; 843
771 844 memset(&state, 0, sizeof(state));
772 fmt = SRP_DATA_DESC_INDIRECT; 845 state.desc = indirect_hdr->desc_list;
773 len = sizeof (struct srp_cmd) + 846 state.pages = req->map_page;
774 sizeof (struct srp_indirect_buf) + 847 state.next_fmr = req->fmr_list;
775 count * sizeof (struct srp_direct_buf); 848
776 849 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
777 scsi_for_each_sg(scmnd, sg, count, i) { 850
778 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 851 for_each_sg(scat, sg, count, i) {
779 852 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
780 buf->desc_list[i].va = 853 /* FMR mapping failed, so backtrack to the first
781 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 854 * unmapped entry and continue on without using FMR.
782 buf->desc_list[i].key = 855 */
783 cpu_to_be32(target->rkey); 856 dma_addr_t dma_addr;
784 buf->desc_list[i].len = cpu_to_be32(dma_len); 857 unsigned int dma_len;
785 datalen += dma_len; 858
859backtrack:
860 sg = state.unmapped_sg;
861 i = state.unmapped_index;
862
863 dma_addr = ib_sg_dma_address(ibdev, sg);
864 dma_len = ib_sg_dma_len(ibdev, sg);
865 dma_len -= (state.unmapped_addr - dma_addr);
866 dma_addr = state.unmapped_addr;
867 use_fmr = SRP_MAP_NO_FMR;
868 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
786 } 869 }
870 }
787 871
788 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 872 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
789 cmd->data_out_desc_cnt = count; 873 goto backtrack;
790 else
791 cmd->data_in_desc_cnt = count;
792 874
793 buf->table_desc.va = 875 /* We've mapped the request, fill in the command buffer.
794 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 876 */
795 buf->table_desc.key = 877 req->nfmr = state.nfmr;
796 cpu_to_be32(target->rkey); 878 if (state.ndesc == 1) {
797 buf->table_desc.len = 879 /* FMR mapping was able to collapse this to one entry,
798 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 880 * so use a direct descriptor.
881 */
882 struct srp_direct_buf *buf = (void *) cmd->add_data;
799 883
800 buf->len = cpu_to_be32(datalen); 884 *buf = indirect_hdr->desc_list[0];
885 goto map_complete;
801 } 886 }
802 887
888 table_len = state.ndesc * sizeof (struct srp_direct_buf);
889
890 fmt = SRP_DATA_DESC_INDIRECT;
891 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
892 len += table_len;
893
894 indirect_addr = req->cmd->dma + sizeof *cmd + sizeof *indirect_hdr;
895
896 indirect_hdr->table_desc.va = cpu_to_be64(indirect_addr);
897 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
898 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
899 indirect_hdr->len = cpu_to_be32(state.total_len);
900
901 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
902 cmd->data_out_desc_cnt = state.ndesc;
903 else
904 cmd->data_in_desc_cnt = state.ndesc;
905
906map_complete:
803 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 907 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
804 cmd->buf_fmt = fmt << 4; 908 cmd->buf_fmt = fmt << 4;
805 else 909 else
@@ -1947,8 +2051,7 @@ static ssize_t srp_create_target(struct device *dev,
1947 container_of(dev, struct srp_host, dev); 2051 container_of(dev, struct srp_host, dev);
1948 struct Scsi_Host *target_host; 2052 struct Scsi_Host *target_host;
1949 struct srp_target_port *target; 2053 struct srp_target_port *target;
1950 int ret; 2054 int i, ret;
1951 int i;
1952 2055
1953 target_host = scsi_host_alloc(&srp_template, 2056 target_host = scsi_host_alloc(&srp_template,
1954 sizeof (struct srp_target_port)); 2057 sizeof (struct srp_target_port));
@@ -1968,14 +2071,6 @@ static ssize_t srp_create_target(struct device *dev,
1968 target->rkey = host->srp_dev->mr->rkey; 2071 target->rkey = host->srp_dev->mr->rkey;
1969 target->cmd_sg_cnt = cmd_sg_entries; 2072 target->cmd_sg_cnt = cmd_sg_entries;
1970 2073
1971 spin_lock_init(&target->lock);
1972 INIT_LIST_HEAD(&target->free_tx);
1973 INIT_LIST_HEAD(&target->free_reqs);
1974 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1975 target->req_ring[i].index = i;
1976 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1977 }
1978
1979 ret = srp_parse_options(buf, target); 2074 ret = srp_parse_options(buf, target);
1980 if (ret) 2075 if (ret)
1981 goto err; 2076 goto err;
@@ -1985,6 +2080,23 @@ static ssize_t srp_create_target(struct device *dev,
1985 sizeof (struct srp_indirect_buf) + 2080 sizeof (struct srp_indirect_buf) +
1986 target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 2081 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
1987 2082
2083 spin_lock_init(&target->lock);
2084 INIT_LIST_HEAD(&target->free_tx);
2085 INIT_LIST_HEAD(&target->free_reqs);
2086 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2087 struct srp_request *req = &target->req_ring[i];
2088
2089 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2090 GFP_KERNEL);
2091 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2092 GFP_KERNEL);
2093 if (!req->fmr_list || !req->map_page)
2094 goto err_free_mem;
2095
2096 req->index = i;
2097 list_add_tail(&req->list, &target->free_reqs);
2098 }
2099
1988 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); 2100 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
1989 2101
1990 shost_printk(KERN_DEBUG, target->scsi_host, PFX 2102 shost_printk(KERN_DEBUG, target->scsi_host, PFX
@@ -1998,11 +2110,11 @@ static ssize_t srp_create_target(struct device *dev,
1998 2110
1999 ret = srp_create_target_ib(target); 2111 ret = srp_create_target_ib(target);
2000 if (ret) 2112 if (ret)
2001 goto err; 2113 goto err_free_mem;
2002 2114
2003 ret = srp_new_cm_id(target); 2115 ret = srp_new_cm_id(target);
2004 if (ret) 2116 if (ret)
2005 goto err_free; 2117 goto err_free_ib;
2006 2118
2007 target->qp_in_error = 0; 2119 target->qp_in_error = 0;
2008 ret = srp_connect_target(target); 2120 ret = srp_connect_target(target);
@@ -2024,9 +2136,12 @@ err_disconnect:
2024err_cm_id: 2136err_cm_id:
2025 ib_destroy_cm_id(target->cm_id); 2137 ib_destroy_cm_id(target->cm_id);
2026 2138
2027err_free: 2139err_free_ib:
2028 srp_free_target_ib(target); 2140 srp_free_target_ib(target);
2029 2141
2142err_free_mem:
2143 srp_free_req_data(target);
2144
2030err: 2145err:
2031 scsi_host_put(target_host); 2146 scsi_host_put(target_host);
2032 2147
@@ -2099,7 +2214,7 @@ static void srp_add_one(struct ib_device *device)
2099 struct ib_device_attr *dev_attr; 2214 struct ib_device_attr *dev_attr;
2100 struct ib_fmr_pool_param fmr_param; 2215 struct ib_fmr_pool_param fmr_param;
2101 struct srp_host *host; 2216 struct srp_host *host;
2102 int s, e, p; 2217 int fmr_page_shift, s, e, p;
2103 2218
2104 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2219 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2105 if (!dev_attr) 2220 if (!dev_attr)
@@ -2117,12 +2232,13 @@ static void srp_add_one(struct ib_device *device)
2117 2232
2118 /* 2233 /*
2119 * Use the smallest page size supported by the HCA, down to a 2234 * Use the smallest page size supported by the HCA, down to a
2120 * minimum of 512 bytes (which is the smallest sector that a 2235 * minimum of 4096 bytes. We're unlikely to build large sglists
2121 * SCSI command will ever carry). 2236 * out of smaller entries.
2122 */ 2237 */
2123 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 2238 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2124 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 2239 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2125 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2240 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2241 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2126 2242
2127 INIT_LIST_HEAD(&srp_dev->dev_list); 2243 INIT_LIST_HEAD(&srp_dev->dev_list);
2128 2244
@@ -2143,7 +2259,7 @@ static void srp_add_one(struct ib_device *device)
2143 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2259 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2144 fmr_param.cache = 1; 2260 fmr_param.cache = 1;
2145 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 2261 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
2146 fmr_param.page_shift = srp_dev->fmr_page_shift; 2262 fmr_param.page_shift = fmr_page_shift;
2147 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2263 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2148 IB_ACCESS_REMOTE_WRITE | 2264 IB_ACCESS_REMOTE_WRITE |
2149 IB_ACCESS_REMOTE_READ); 2265 IB_ACCESS_REMOTE_READ);
@@ -2223,6 +2339,7 @@ static void srp_remove_one(struct ib_device *device)
2223 srp_disconnect_target(target); 2339 srp_disconnect_target(target);
2224 ib_destroy_cm_id(target->cm_id); 2340 ib_destroy_cm_id(target->cm_id);
2225 srp_free_target_ib(target); 2341 srp_free_target_ib(target);
2342 srp_free_req_data(target);
2226 scsi_host_put(target->scsi_host); 2343 scsi_host_put(target->scsi_host);
2227 } 2344 }
2228 2345
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index db39dbf76216..b43b5e7acbde 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -71,7 +71,10 @@ enum {
71 71
72 SRP_FMR_SIZE = 256, 72 SRP_FMR_SIZE = 256,
73 SRP_FMR_POOL_SIZE = 1024, 73 SRP_FMR_POOL_SIZE = 1024,
74 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 74 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4,
75
76 SRP_MAP_ALLOW_FMR = 0,
77 SRP_MAP_NO_FMR = 1,
75}; 78};
76 79
77enum srp_target_state { 80enum srp_target_state {
@@ -93,9 +96,9 @@ struct srp_device {
93 struct ib_pd *pd; 96 struct ib_pd *pd;
94 struct ib_mr *mr; 97 struct ib_mr *mr;
95 struct ib_fmr_pool *fmr_pool; 98 struct ib_fmr_pool *fmr_pool;
96 int fmr_page_shift;
97 int fmr_page_size;
98 u64 fmr_page_mask; 99 u64 fmr_page_mask;
100 int fmr_page_size;
101 int fmr_max_size;
99}; 102};
100 103
101struct srp_host { 104struct srp_host {
@@ -112,7 +115,9 @@ struct srp_request {
112 struct list_head list; 115 struct list_head list;
113 struct scsi_cmnd *scmnd; 116 struct scsi_cmnd *scmnd;
114 struct srp_iu *cmd; 117 struct srp_iu *cmd;
115 struct ib_pool_fmr *fmr; 118 struct ib_pool_fmr **fmr_list;
119 u64 *map_page;
120 short nfmr;
116 short index; 121 short index;
117}; 122};
118 123
@@ -181,4 +186,19 @@ struct srp_iu {
181 enum dma_data_direction direction; 186 enum dma_data_direction direction;
182}; 187};
183 188
189struct srp_map_state {
190 struct ib_pool_fmr **next_fmr;
191 struct srp_direct_buf *desc;
192 u64 *pages;
193 dma_addr_t base_dma_addr;
194 u32 fmr_len;
195 u32 total_len;
196 unsigned int npages;
197 unsigned int nfmr;
198 unsigned int ndesc;
199 struct scatterlist *unmapped_sg;
200 int unmapped_index;
201 dma_addr_t unmapped_addr;
202};
203
184#endif /* IB_SRP_H */ 204#endif /* IB_SRP_H */