aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2014-05-28 10:33:42 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2014-06-04 08:56:44 -0400
commit1c00dd0776543608e13c74a527660cb8cd28a74f (patch)
tree225dba01c882c314e3f7aa8bfd9febd830344f22 /net/sunrpc/xprtrdma
parent7f23f6f6e388d2003c4ecf5d558f3c2191e12530 (diff)
xprtrmda: Reduce calls to ib_poll_cq() in completion handlers
Change the completion handlers to grab up to 16 items per ib_poll_cq() call. No extra ib_poll_cq() is needed if fewer than 16 items are returned. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/verbs.c56
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h4
2 files changed, 42 insertions, 18 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index c7d5281063fd..b8caee91661c 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -162,14 +162,23 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
162} 162}
163 163
164static int 164static int
165rpcrdma_sendcq_poll(struct ib_cq *cq) 165rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
166{ 166{
167 struct ib_wc wc; 167 struct ib_wc *wcs;
168 int rc; 168 int count, rc;
169 169
170 while ((rc = ib_poll_cq(cq, 1, &wc)) == 1) 170 do {
171 rpcrdma_sendcq_process_wc(&wc); 171 wcs = ep->rep_send_wcs;
172 return rc; 172
173 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
174 if (rc <= 0)
175 return rc;
176
177 count = rc;
178 while (count-- > 0)
179 rpcrdma_sendcq_process_wc(wcs++);
180 } while (rc == RPCRDMA_POLLSIZE);
181 return 0;
173} 182}
174 183
175/* 184/*
@@ -183,9 +192,10 @@ rpcrdma_sendcq_poll(struct ib_cq *cq)
183static void 192static void
184rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) 193rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
185{ 194{
195 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
186 int rc; 196 int rc;
187 197
188 rc = rpcrdma_sendcq_poll(cq); 198 rc = rpcrdma_sendcq_poll(cq, ep);
189 if (rc) { 199 if (rc) {
190 dprintk("RPC: %s: ib_poll_cq failed: %i\n", 200 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
191 __func__, rc); 201 __func__, rc);
@@ -202,7 +212,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
202 return; 212 return;
203 } 213 }
204 214
205 rpcrdma_sendcq_poll(cq); 215 rpcrdma_sendcq_poll(cq, ep);
206} 216}
207 217
208static void 218static void
@@ -241,14 +251,23 @@ out_schedule:
241} 251}
242 252
243static int 253static int
244rpcrdma_recvcq_poll(struct ib_cq *cq) 254rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
245{ 255{
246 struct ib_wc wc; 256 struct ib_wc *wcs;
247 int rc; 257 int count, rc;
248 258
249 while ((rc = ib_poll_cq(cq, 1, &wc)) == 1) 259 do {
250 rpcrdma_recvcq_process_wc(&wc); 260 wcs = ep->rep_recv_wcs;
251 return rc; 261
262 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
263 if (rc <= 0)
264 return rc;
265
266 count = rc;
267 while (count-- > 0)
268 rpcrdma_recvcq_process_wc(wcs++);
269 } while (rc == RPCRDMA_POLLSIZE);
270 return 0;
252} 271}
253 272
254/* 273/*
@@ -266,9 +285,10 @@ rpcrdma_recvcq_poll(struct ib_cq *cq)
266static void 285static void
267rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) 286rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
268{ 287{
288 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
269 int rc; 289 int rc;
270 290
271 rc = rpcrdma_recvcq_poll(cq); 291 rc = rpcrdma_recvcq_poll(cq, ep);
272 if (rc) { 292 if (rc) {
273 dprintk("RPC: %s: ib_poll_cq failed: %i\n", 293 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
274 __func__, rc); 294 __func__, rc);
@@ -285,7 +305,7 @@ rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
285 return; 305 return;
286 } 306 }
287 307
288 rpcrdma_recvcq_poll(cq); 308 rpcrdma_recvcq_poll(cq, ep);
289} 309}
290 310
291#ifdef RPC_DEBUG 311#ifdef RPC_DEBUG
@@ -721,7 +741,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
721 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 741 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
722 742
723 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, 743 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
724 rpcrdma_cq_async_error_upcall, NULL, 744 rpcrdma_cq_async_error_upcall, ep,
725 ep->rep_attr.cap.max_send_wr + 1, 0); 745 ep->rep_attr.cap.max_send_wr + 1, 0);
726 if (IS_ERR(sendcq)) { 746 if (IS_ERR(sendcq)) {
727 rc = PTR_ERR(sendcq); 747 rc = PTR_ERR(sendcq);
@@ -738,7 +758,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
738 } 758 }
739 759
740 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, 760 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
741 rpcrdma_cq_async_error_upcall, NULL, 761 rpcrdma_cq_async_error_upcall, ep,
742 ep->rep_attr.cap.max_recv_wr + 1, 0); 762 ep->rep_attr.cap.max_recv_wr + 1, 0);
743 if (IS_ERR(recvcq)) { 763 if (IS_ERR(recvcq)) {
744 rc = PTR_ERR(recvcq); 764 rc = PTR_ERR(recvcq);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 334ab6ee041a..cb4c882b97fe 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -74,6 +74,8 @@ struct rpcrdma_ia {
74 * RDMA Endpoint -- one per transport instance 74 * RDMA Endpoint -- one per transport instance
75 */ 75 */
76 76
77#define RPCRDMA_POLLSIZE (16)
78
77struct rpcrdma_ep { 79struct rpcrdma_ep {
78 atomic_t rep_cqcount; 80 atomic_t rep_cqcount;
79 int rep_cqinit; 81 int rep_cqinit;
@@ -88,6 +90,8 @@ struct rpcrdma_ep {
88 struct rdma_conn_param rep_remote_cma; 90 struct rdma_conn_param rep_remote_cma;
89 struct sockaddr_storage rep_remote_addr; 91 struct sockaddr_storage rep_remote_addr;
90 struct delayed_work rep_connect_worker; 92 struct delayed_work rep_connect_worker;
93 struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE];
94 struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE];
91}; 95};
92 96
93#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 97#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)