diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2014-07-29 17:25:46 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2014-07-31 16:22:58 -0400 |
commit | bb96193d9104613cd87fb518f25db3fadc36432e (patch) | |
tree | e2f8fdaf9d1307cf479585fa6dd2766f1f02b75c /net/sunrpc | |
parent | 2e84522c2e0323a090fe1f7eeed6d5b6a68efe5f (diff) |
xprtrdma: Schedule reply tasklet once per upcall
Minor optimization: grab rpcrdma_tk_lock_g and disable hard IRQs
just once after clearing the receive completion queue.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 31c4fd36d62c..f124f04e2e4e 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -105,17 +105,6 @@ rpcrdma_run_tasklet(unsigned long data) | |||
105 | 105 | ||
106 | static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); | 106 | static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); |
107 | 107 | ||
108 | static inline void | ||
109 | rpcrdma_schedule_tasklet(struct rpcrdma_rep *rep) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | |||
113 | spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); | ||
114 | list_add_tail(&rep->rr_list, &rpcrdma_tasklets_g); | ||
115 | spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); | ||
116 | tasklet_schedule(&rpcrdma_tasklet_g); | ||
117 | } | ||
118 | |||
119 | static void | 108 | static void |
120 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | 109 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) |
121 | { | 110 | { |
@@ -214,7 +203,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) | |||
214 | } | 203 | } |
215 | 204 | ||
216 | static void | 205 | static void |
217 | rpcrdma_recvcq_process_wc(struct ib_wc *wc) | 206 | rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list) |
218 | { | 207 | { |
219 | struct rpcrdma_rep *rep = | 208 | struct rpcrdma_rep *rep = |
220 | (struct rpcrdma_rep *)(unsigned long)wc->wr_id; | 209 | (struct rpcrdma_rep *)(unsigned long)wc->wr_id; |
@@ -245,28 +234,38 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc) | |||
245 | } | 234 | } |
246 | 235 | ||
247 | out_schedule: | 236 | out_schedule: |
248 | rpcrdma_schedule_tasklet(rep); | 237 | list_add_tail(&rep->rr_list, sched_list); |
249 | } | 238 | } |
250 | 239 | ||
251 | static int | 240 | static int |
252 | rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) | 241 | rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) |
253 | { | 242 | { |
243 | struct list_head sched_list; | ||
254 | struct ib_wc *wcs; | 244 | struct ib_wc *wcs; |
255 | int budget, count, rc; | 245 | int budget, count, rc; |
246 | unsigned long flags; | ||
256 | 247 | ||
248 | INIT_LIST_HEAD(&sched_list); | ||
257 | budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; | 249 | budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; |
258 | do { | 250 | do { |
259 | wcs = ep->rep_recv_wcs; | 251 | wcs = ep->rep_recv_wcs; |
260 | 252 | ||
261 | rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs); | 253 | rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs); |
262 | if (rc <= 0) | 254 | if (rc <= 0) |
263 | return rc; | 255 | goto out_schedule; |
264 | 256 | ||
265 | count = rc; | 257 | count = rc; |
266 | while (count-- > 0) | 258 | while (count-- > 0) |
267 | rpcrdma_recvcq_process_wc(wcs++); | 259 | rpcrdma_recvcq_process_wc(wcs++, &sched_list); |
268 | } while (rc == RPCRDMA_POLLSIZE && --budget); | 260 | } while (rc == RPCRDMA_POLLSIZE && --budget); |
269 | return 0; | 261 | rc = 0; |
262 | |||
263 | out_schedule: | ||
264 | spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); | ||
265 | list_splice_tail(&sched_list, &rpcrdma_tasklets_g); | ||
266 | spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); | ||
267 | tasklet_schedule(&rpcrdma_tasklet_g); | ||
268 | return rc; | ||
270 | } | 269 | } |
271 | 270 | ||
272 | /* | 271 | /* |