aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-05-07 15:27:21 -0400
committerJ. Bruce Fields <bfields@redhat.com>2018-05-11 15:48:57 -0400
commitecf85b2384ea5f7cb0577bf6143bc46d9ecfe4d3 (patch)
tree6f0ca0a83d39f3c6b82bd0df41896cfb315645f9 /include/linux
parentbd2abef33394dc16d63580c38c01420db991f0f2 (diff)
svcrdma: Introduce svc_rdma_recv_ctxt
svc_rdma_op_ctxt's are pre-allocated and maintained on a per-xprt free list. This eliminates the overhead of calling kmalloc / kfree, both of which grab a globally shared lock that disables interrupts. To reduce contention further, separate the use of these objects in the Receive and Send paths in svcrdma. Subsequent patches will take advantage of this separation by allocating real resources which are then cached in these objects. The allocations are freed when the transport is torn down. I've renamed the structure so that static type checking can be used to ensure that uses of op_ctxt and recv_ctxt are not confused. As an additional clean up, structure fields are renamed to conform with kernel coding conventions. As a final clean up, helpers related to recv_ctxt are moved closer to the functions that use them. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sunrpc/svc_rdma.h24
1 files changed, 23 insertions, 1 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 88da0c9bd7b1..37f759d65348 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -128,6 +128,9 @@ struct svcxprt_rdma {
128 unsigned long sc_flags; 128 unsigned long sc_flags;
129 struct list_head sc_read_complete_q; 129 struct list_head sc_read_complete_q;
130 struct work_struct sc_work; 130 struct work_struct sc_work;
131
132 spinlock_t sc_recv_lock;
133 struct list_head sc_recv_ctxts;
131}; 134};
132/* sc_flags */ 135/* sc_flags */
133#define RDMAXPRT_CONN_PENDING 3 136#define RDMAXPRT_CONN_PENDING 3
@@ -142,6 +145,19 @@ struct svcxprt_rdma {
142 145
143#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD 146#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
144 147
148struct svc_rdma_recv_ctxt {
149 struct list_head rc_list;
150 struct ib_recv_wr rc_recv_wr;
151 struct ib_cqe rc_cqe;
152 struct xdr_buf rc_arg;
153 u32 rc_byte_len;
154 unsigned int rc_page_count;
155 unsigned int rc_hdr_count;
156 struct ib_sge rc_sges[1 +
157 RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
158 struct page *rc_pages[RPCSVC_MAXPAGES];
159};
160
145/* Track DMA maps for this transport and context */ 161/* Track DMA maps for this transport and context */
146static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, 162static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
147 struct svc_rdma_op_ctxt *ctxt) 163 struct svc_rdma_op_ctxt *ctxt)
@@ -155,13 +171,19 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
155 struct xdr_buf *rcvbuf); 171 struct xdr_buf *rcvbuf);
156 172
157/* svc_rdma_recvfrom.c */ 173/* svc_rdma_recvfrom.c */
174extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
175extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
176extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
177 struct svc_rdma_recv_ctxt *ctxt,
178 int free_pages);
179extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
158extern int svc_rdma_recvfrom(struct svc_rqst *); 180extern int svc_rdma_recvfrom(struct svc_rqst *);
159 181
160/* svc_rdma_rw.c */ 182/* svc_rdma_rw.c */
161extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); 183extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
162extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, 184extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
163 struct svc_rqst *rqstp, 185 struct svc_rqst *rqstp,
164 struct svc_rdma_op_ctxt *head, __be32 *p); 186 struct svc_rdma_recv_ctxt *head, __be32 *p);
165extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, 187extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
166 __be32 *wr_ch, struct xdr_buf *xdr); 188 __be32 *wr_ch, struct xdr_buf *xdr);
167extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, 189extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,