aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2008-10-03 16:22:18 -0400
committerTom Tucker <tom@opengridcomputing.com>2008-10-06 15:45:49 -0400
commite1183210625cc8e02ce13eec78fb7a246567fc59 (patch)
treec01cb7326fb64b899cc375bbadcea0e4579c6043 /net
parent3a5c63803d0552a3ad93b85c262f12cd86471443 (diff)
svcrdma: Add a service to register a Fast Reg MR with the device
Fast Reg MR introduces a new WR type. Add a service to register the region with the adapter and update the completion handling to support completions with a NULL WR context. Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c111
1 files changed, 76 insertions, 35 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index a8ec4b1eec58..c3e8db0eeb3b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -326,6 +326,45 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
326} 326}
327 327
328/* 328/*
329 * Processs a completion context
330 */
331static void process_context(struct svcxprt_rdma *xprt,
332 struct svc_rdma_op_ctxt *ctxt)
333{
334 svc_rdma_unmap_dma(ctxt);
335
336 switch (ctxt->wr_op) {
337 case IB_WR_SEND:
338 svc_rdma_put_context(ctxt, 1);
339 break;
340
341 case IB_WR_RDMA_WRITE:
342 svc_rdma_put_context(ctxt, 0);
343 break;
344
345 case IB_WR_RDMA_READ:
346 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
347 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
348 BUG_ON(!read_hdr);
349 spin_lock_bh(&xprt->sc_rq_dto_lock);
350 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
351 list_add_tail(&read_hdr->dto_q,
352 &xprt->sc_read_complete_q);
353 spin_unlock_bh(&xprt->sc_rq_dto_lock);
354 svc_xprt_enqueue(&xprt->sc_xprt);
355 }
356 svc_rdma_put_context(ctxt, 0);
357 break;
358
359 default:
360 printk(KERN_ERR "svcrdma: unexpected completion type, "
361 "opcode=%d\n",
362 ctxt->wr_op);
363 break;
364 }
365}
366
367/*
329 * Send Queue Completion Handler - potentially called on interrupt context. 368 * Send Queue Completion Handler - potentially called on interrupt context.
330 * 369 *
331 * Note that caller must hold a transport reference. 370 * Note that caller must hold a transport reference.
@@ -337,17 +376,12 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
337 struct ib_cq *cq = xprt->sc_sq_cq; 376 struct ib_cq *cq = xprt->sc_sq_cq;
338 int ret; 377 int ret;
339 378
340
341 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) 379 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
342 return; 380 return;
343 381
344 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); 382 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
345 atomic_inc(&rdma_stat_sq_poll); 383 atomic_inc(&rdma_stat_sq_poll);
346 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { 384 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
347 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
348 xprt = ctxt->xprt;
349
350 svc_rdma_unmap_dma(ctxt);
351 if (wc.status != IB_WC_SUCCESS) 385 if (wc.status != IB_WC_SUCCESS)
352 /* Close the transport */ 386 /* Close the transport */
353 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 387 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
@@ -356,35 +390,10 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
356 atomic_dec(&xprt->sc_sq_count); 390 atomic_dec(&xprt->sc_sq_count);
357 wake_up(&xprt->sc_send_wait); 391 wake_up(&xprt->sc_send_wait);
358 392
359 switch (ctxt->wr_op) { 393 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
360 case IB_WR_SEND: 394 if (ctxt)
361 svc_rdma_put_context(ctxt, 1); 395 process_context(xprt, ctxt);
362 break;
363
364 case IB_WR_RDMA_WRITE:
365 svc_rdma_put_context(ctxt, 0);
366 break;
367
368 case IB_WR_RDMA_READ:
369 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
370 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
371 BUG_ON(!read_hdr);
372 spin_lock_bh(&xprt->sc_rq_dto_lock);
373 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
374 list_add_tail(&read_hdr->dto_q,
375 &xprt->sc_read_complete_q);
376 spin_unlock_bh(&xprt->sc_rq_dto_lock);
377 svc_xprt_enqueue(&xprt->sc_xprt);
378 }
379 svc_rdma_put_context(ctxt, 0);
380 break;
381 396
382 default:
383 printk(KERN_ERR "svcrdma: unexpected completion type, "
384 "opcode=%d, status=%d\n",
385 wc.opcode, wc.status);
386 break;
387 }
388 svc_xprt_put(&xprt->sc_xprt); 397 svc_xprt_put(&xprt->sc_xprt);
389 } 398 }
390 399
@@ -1184,6 +1193,40 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1184 return 1; 1193 return 1;
1185} 1194}
1186 1195
1196/*
1197 * Attempt to register the kvec representing the RPC memory with the
1198 * device.
1199 *
1200 * Returns:
1201 * NULL : The device does not support fastreg or there were no more
1202 * fastreg mr.
1203 * frmr : The kvec register request was successfully posted.
1204 * <0 : An error was encountered attempting to register the kvec.
1205 */
1206int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1207 struct svc_rdma_fastreg_mr *frmr)
1208{
1209 struct ib_send_wr fastreg_wr;
1210 u8 key;
1211
1212 /* Bump the key */
1213 key = (u8)(frmr->mr->lkey & 0x000000FF);
1214 ib_update_fast_reg_key(frmr->mr, ++key);
1215
1216 /* Prepare FASTREG WR */
1217 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1218 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1219 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1220 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1221 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1222 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1223 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1224 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1225 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1226 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1227 return svc_rdma_send(xprt, &fastreg_wr);
1228}
1229
1187int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) 1230int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1188{ 1231{
1189 struct ib_send_wr *bad_wr; 1232 struct ib_send_wr *bad_wr;
@@ -1193,8 +1236,6 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1193 return -ENOTCONN; 1236 return -ENOTCONN;
1194 1237
1195 BUG_ON(wr->send_flags != IB_SEND_SIGNALED); 1238 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1196 BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
1197 wr->opcode);
1198 /* If the SQ is full, wait until an SQ entry is available */ 1239 /* If the SQ is full, wait until an SQ entry is available */
1199 while (1) { 1240 while (1) {
1200 spin_lock_bh(&xprt->sc_lock); 1241 spin_lock_bh(&xprt->sc_lock);