diff options
author | santosh.shilimkar@oracle.com <santosh.shilimkar@oracle.com> | 2016-03-01 18:20:45 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-03-02 14:13:17 -0500 |
commit | dcfd041c8710320d59fce322fd901bddaf912ae8 (patch) | |
tree | e2ab557bfa9111e879b64715a21fb001172527a9 /net/rds/ib_cm.c | |
parent | 72f26eee51e89c9d13b3aa199262fad57386f9e5 (diff) |
RDS: IB: Remove the RDS_IB_SEND_OP dependency
This helps to combine asynchronous fastreg MR completion handler
with send completion handler.
No functional change.
Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/ib_cm.c')
-rw-r--r-- | net/rds/ib_cm.c | 42 |
1 files changed, 27 insertions, 15 deletions
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index da5a7fb98c77..7f68abc8a5bf 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -236,12 +236,10 @@ static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) | |||
236 | tasklet_schedule(&ic->i_recv_tasklet); | 236 | tasklet_schedule(&ic->i_recv_tasklet); |
237 | } | 237 | } |
238 | 238 | ||
239 | static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq, | 239 | static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, |
240 | struct ib_wc *wcs, | 240 | struct ib_wc *wcs) |
241 | struct rds_ib_ack_state *ack_state) | ||
242 | { | 241 | { |
243 | int nr; | 242 | int nr, i; |
244 | int i; | ||
245 | struct ib_wc *wc; | 243 | struct ib_wc *wc; |
246 | 244 | ||
247 | while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { | 245 | while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { |
@@ -251,10 +249,7 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq, | |||
251 | (unsigned long long)wc->wr_id, wc->status, | 249 | (unsigned long long)wc->wr_id, wc->status, |
252 | wc->byte_len, be32_to_cpu(wc->ex.imm_data)); | 250 | wc->byte_len, be32_to_cpu(wc->ex.imm_data)); |
253 | 251 | ||
254 | if (wc->wr_id & RDS_IB_SEND_OP) | 252 | rds_ib_send_cqe_handler(ic, wc); |
255 | rds_ib_send_cqe_handler(ic, wc); | ||
256 | else | ||
257 | rds_ib_recv_cqe_handler(ic, wc, ack_state); | ||
258 | } | 253 | } |
259 | } | 254 | } |
260 | } | 255 | } |
@@ -263,14 +258,12 @@ static void rds_ib_tasklet_fn_send(unsigned long data) | |||
263 | { | 258 | { |
264 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; | 259 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; |
265 | struct rds_connection *conn = ic->conn; | 260 | struct rds_connection *conn = ic->conn; |
266 | struct rds_ib_ack_state state; | ||
267 | 261 | ||
268 | rds_ib_stats_inc(s_ib_tasklet_call); | 262 | rds_ib_stats_inc(s_ib_tasklet_call); |
269 | 263 | ||
270 | memset(&state, 0, sizeof(state)); | 264 | poll_scq(ic, ic->i_send_cq, ic->i_send_wc); |
271 | poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state); | ||
272 | ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); | 265 | ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); |
273 | poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state); | 266 | poll_scq(ic, ic->i_send_cq, ic->i_send_wc); |
274 | 267 | ||
275 | if (rds_conn_up(conn) && | 268 | if (rds_conn_up(conn) && |
276 | (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || | 269 | (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || |
@@ -278,6 +271,25 @@ static void rds_ib_tasklet_fn_send(unsigned long data) | |||
278 | rds_send_xmit(ic->conn); | 271 | rds_send_xmit(ic->conn); |
279 | } | 272 | } |
280 | 273 | ||
274 | static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, | ||
275 | struct ib_wc *wcs, | ||
276 | struct rds_ib_ack_state *ack_state) | ||
277 | { | ||
278 | int nr, i; | ||
279 | struct ib_wc *wc; | ||
280 | |||
281 | while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { | ||
282 | for (i = 0; i < nr; i++) { | ||
283 | wc = wcs + i; | ||
284 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | ||
285 | (unsigned long long)wc->wr_id, wc->status, | ||
286 | wc->byte_len, be32_to_cpu(wc->ex.imm_data)); | ||
287 | |||
288 | rds_ib_recv_cqe_handler(ic, wc, ack_state); | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
281 | static void rds_ib_tasklet_fn_recv(unsigned long data) | 293 | static void rds_ib_tasklet_fn_recv(unsigned long data) |
282 | { | 294 | { |
283 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; | 295 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; |
@@ -291,9 +303,9 @@ static void rds_ib_tasklet_fn_recv(unsigned long data) | |||
291 | rds_ib_stats_inc(s_ib_tasklet_call); | 303 | rds_ib_stats_inc(s_ib_tasklet_call); |
292 | 304 | ||
293 | memset(&state, 0, sizeof(state)); | 305 | memset(&state, 0, sizeof(state)); |
294 | poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); | 306 | poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); |
295 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | 307 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); |
296 | poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); | 308 | poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); |
297 | 309 | ||
298 | if (state.ack_next_valid) | 310 | if (state.ack_next_valid) |
299 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | 311 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); |