aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-02-07 11:58:56 -0500
committerJ. Bruce Fields <bfields@redhat.com>2017-02-08 14:42:00 -0500
commita3ab867fa64f9aedb3b01d570db5b43d2fc355fc (patch)
tree24e6d9003d89386554b55a1fe540a2c779f4e92a
parentaba7d14ba18c93a2ab37d50b057a885964ef285c (diff)
svcrdma: Combine list fields in struct svc_rdma_op_ctxt
Clean up: The free list and the dto_q list fields are never used at the same time. Reduce the size of struct svc_rdma_op_ctxt by combining these fields. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c14
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c33
3 files changed, 22 insertions, 28 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f77a7bc1612c..b105f73e3ca2 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
70 * completes. 70 * completes.
71 */ 71 */
72struct svc_rdma_op_ctxt { 72struct svc_rdma_op_ctxt {
73 struct list_head free; 73 struct list_head list;
74 struct svc_rdma_op_ctxt *read_hdr; 74 struct svc_rdma_op_ctxt *read_hdr;
75 struct svc_rdma_fastreg_mr *frmr; 75 struct svc_rdma_fastreg_mr *frmr;
76 int hdr_count; 76 int hdr_count;
@@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt {
78 struct ib_cqe cqe; 78 struct ib_cqe cqe;
79 struct ib_cqe reg_cqe; 79 struct ib_cqe reg_cqe;
80 struct ib_cqe inv_cqe; 80 struct ib_cqe inv_cqe;
81 struct list_head dto_q;
82 u32 byte_len; 81 u32 byte_len;
83 u32 position; 82 u32 position;
84 struct svcxprt_rdma *xprt; 83 struct svcxprt_rdma *xprt;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 172b537f8cfc..b9ccd73631a9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -608,18 +608,16 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
608 608
609 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); 609 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
610 if (!list_empty(&rdma_xprt->sc_read_complete_q)) { 610 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
611 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, 611 ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
612 struct svc_rdma_op_ctxt, 612 struct svc_rdma_op_ctxt, list);
613 dto_q); 613 list_del(&ctxt->list);
614 list_del_init(&ctxt->dto_q);
615 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); 614 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
616 rdma_read_complete(rqstp, ctxt); 615 rdma_read_complete(rqstp, ctxt);
617 goto complete; 616 goto complete;
618 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { 617 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
619 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, 618 ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q,
620 struct svc_rdma_op_ctxt, 619 struct svc_rdma_op_ctxt, list);
621 dto_q); 620 list_del(&ctxt->list);
622 list_del_init(&ctxt->dto_q);
623 } else { 621 } else {
624 atomic_inc(&rdma_stat_rq_starve); 622 atomic_inc(&rdma_stat_rq_starve);
625 clear_bit(XPT_DATA, &xprt->xpt_flags); 623 clear_bit(XPT_DATA, &xprt->xpt_flags);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 023eaa01157f..87b8b5a10324 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -157,8 +157,7 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
157 ctxt = kmalloc(sizeof(*ctxt), flags); 157 ctxt = kmalloc(sizeof(*ctxt), flags);
158 if (ctxt) { 158 if (ctxt) {
159 ctxt->xprt = xprt; 159 ctxt->xprt = xprt;
160 INIT_LIST_HEAD(&ctxt->free); 160 INIT_LIST_HEAD(&ctxt->list);
161 INIT_LIST_HEAD(&ctxt->dto_q);
162 } 161 }
163 return ctxt; 162 return ctxt;
164} 163}
@@ -180,7 +179,7 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
180 dprintk("svcrdma: No memory for RDMA ctxt\n"); 179 dprintk("svcrdma: No memory for RDMA ctxt\n");
181 return false; 180 return false;
182 } 181 }
183 list_add(&ctxt->free, &xprt->sc_ctxts); 182 list_add(&ctxt->list, &xprt->sc_ctxts);
184 } 183 }
185 return true; 184 return true;
186} 185}
@@ -195,8 +194,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
195 goto out_empty; 194 goto out_empty;
196 195
197 ctxt = list_first_entry(&xprt->sc_ctxts, 196 ctxt = list_first_entry(&xprt->sc_ctxts,
198 struct svc_rdma_op_ctxt, free); 197 struct svc_rdma_op_ctxt, list);
199 list_del_init(&ctxt->free); 198 list_del(&ctxt->list);
200 spin_unlock_bh(&xprt->sc_ctxt_lock); 199 spin_unlock_bh(&xprt->sc_ctxt_lock);
201 200
202out: 201out:
@@ -256,7 +255,7 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
256 255
257 spin_lock_bh(&xprt->sc_ctxt_lock); 256 spin_lock_bh(&xprt->sc_ctxt_lock);
258 xprt->sc_ctxt_used--; 257 xprt->sc_ctxt_used--;
259 list_add(&ctxt->free, &xprt->sc_ctxts); 258 list_add(&ctxt->list, &xprt->sc_ctxts);
260 spin_unlock_bh(&xprt->sc_ctxt_lock); 259 spin_unlock_bh(&xprt->sc_ctxt_lock);
261} 260}
262 261
@@ -266,8 +265,8 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
266 struct svc_rdma_op_ctxt *ctxt; 265 struct svc_rdma_op_ctxt *ctxt;
267 266
268 ctxt = list_first_entry(&xprt->sc_ctxts, 267 ctxt = list_first_entry(&xprt->sc_ctxts,
269 struct svc_rdma_op_ctxt, free); 268 struct svc_rdma_op_ctxt, list);
270 list_del(&ctxt->free); 269 list_del(&ctxt->list);
271 kfree(ctxt); 270 kfree(ctxt);
272 } 271 }
273} 272}
@@ -404,7 +403,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
404 /* All wc fields are now known to be valid */ 403 /* All wc fields are now known to be valid */
405 ctxt->byte_len = wc->byte_len; 404 ctxt->byte_len = wc->byte_len;
406 spin_lock(&xprt->sc_rq_dto_lock); 405 spin_lock(&xprt->sc_rq_dto_lock);
407 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); 406 list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
408 spin_unlock(&xprt->sc_rq_dto_lock); 407 spin_unlock(&xprt->sc_rq_dto_lock);
409 408
410 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 409 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
@@ -525,7 +524,7 @@ void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
525 524
526 read_hdr = ctxt->read_hdr; 525 read_hdr = ctxt->read_hdr;
527 spin_lock(&xprt->sc_rq_dto_lock); 526 spin_lock(&xprt->sc_rq_dto_lock);
528 list_add_tail(&read_hdr->dto_q, 527 list_add_tail(&read_hdr->list,
529 &xprt->sc_read_complete_q); 528 &xprt->sc_read_complete_q);
530 spin_unlock(&xprt->sc_rq_dto_lock); 529 spin_unlock(&xprt->sc_rq_dto_lock);
531 530
@@ -1213,20 +1212,18 @@ static void __svc_rdma_free(struct work_struct *work)
1213 */ 1212 */
1214 while (!list_empty(&rdma->sc_read_complete_q)) { 1213 while (!list_empty(&rdma->sc_read_complete_q)) {
1215 struct svc_rdma_op_ctxt *ctxt; 1214 struct svc_rdma_op_ctxt *ctxt;
1216 ctxt = list_entry(rdma->sc_read_complete_q.next, 1215 ctxt = list_first_entry(&rdma->sc_read_complete_q,
1217 struct svc_rdma_op_ctxt, 1216 struct svc_rdma_op_ctxt, list);
1218 dto_q); 1217 list_del(&ctxt->list);
1219 list_del_init(&ctxt->dto_q);
1220 svc_rdma_put_context(ctxt, 1); 1218 svc_rdma_put_context(ctxt, 1);
1221 } 1219 }
1222 1220
1223 /* Destroy queued, but not processed recv completions */ 1221 /* Destroy queued, but not processed recv completions */
1224 while (!list_empty(&rdma->sc_rq_dto_q)) { 1222 while (!list_empty(&rdma->sc_rq_dto_q)) {
1225 struct svc_rdma_op_ctxt *ctxt; 1223 struct svc_rdma_op_ctxt *ctxt;
1226 ctxt = list_entry(rdma->sc_rq_dto_q.next, 1224 ctxt = list_first_entry(&rdma->sc_rq_dto_q,
1227 struct svc_rdma_op_ctxt, 1225 struct svc_rdma_op_ctxt, list);
1228 dto_q); 1226 list_del(&ctxt->list);
1229 list_del_init(&ctxt->dto_q);
1230 svc_rdma_put_context(ctxt, 1); 1227 svc_rdma_put_context(ctxt, 1);
1231 } 1228 }
1232 1229