diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-08-28 15:06:14 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2017-09-05 15:15:30 -0400 |
commit | 0062818298662d0d05061949d12880146b5ebd65 (patch) | |
tree | 50f9a9d6b223eba747b65857157a87bdb1356c2c | |
parent | 5a25bfd28c321e522dbe8083ad6219fa2a820610 (diff) |
rdma core: Add rdma_rw_mr_payload()
The amount of payload per MR depends on device capabilities and
the memory registration mode in use. The new rdma_rw API hides both,
making it difficult for ULPs to determine how large their transport
send queues need to be.
Expose the MR payload information via a new API.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Acked-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | drivers/infiniband/core/rw.c | 24 | ||||
-rw-r--r-- | include/rdma/rw.h | 2 |
2 files changed, 26 insertions, 0 deletions
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index dbfd854c32c9..6ca607e8e293 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c | |||
@@ -643,6 +643,30 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
643 | } | 643 | } |
644 | EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); | 644 | EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); |
645 | 645 | ||
646 | /** | ||
647 | * rdma_rw_mr_factor - return number of MRs required for a payload | ||
648 | * @device: device handling the connection | ||
649 | * @port_num: port num to which the connection is bound | ||
650 | * @maxpages: maximum payload pages per rdma_rw_ctx | ||
651 | * | ||
652 | * Returns the number of MRs the device requires to move @maxpayload | ||
653 | * bytes. The returned value is used during transport creation to | ||
654 | * compute max_rdma_ctxts and the size of the transport's Send and | ||
655 | * Send Completion Queues. | ||
656 | */ | ||
657 | unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, | ||
658 | unsigned int maxpages) | ||
659 | { | ||
660 | unsigned int mr_pages; | ||
661 | |||
662 | if (rdma_rw_can_use_mr(device, port_num)) | ||
663 | mr_pages = rdma_rw_fr_page_list_len(device); | ||
664 | else | ||
665 | mr_pages = device->attrs.max_sge_rd; | ||
666 | return DIV_ROUND_UP(maxpages, mr_pages); | ||
667 | } | ||
668 | EXPORT_SYMBOL(rdma_rw_mr_factor); | ||
669 | |||
646 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) | 670 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) |
647 | { | 671 | { |
648 | u32 factor; | 672 | u32 factor; |
diff --git a/include/rdma/rw.h b/include/rdma/rw.h index 377d865e506d..a3cbbc7b6417 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h | |||
@@ -81,6 +81,8 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
81 | int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | 81 | int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, |
82 | struct ib_cqe *cqe, struct ib_send_wr *chain_wr); | 82 | struct ib_cqe *cqe, struct ib_send_wr *chain_wr); |
83 | 83 | ||
84 | unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, | ||
85 | unsigned int maxpages); | ||
84 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); | 86 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); |
85 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); | 87 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); |
86 | void rdma_rw_cleanup_mrs(struct ib_qp *qp); | 88 | void rdma_rw_cleanup_mrs(struct ib_qp *qp); |