aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_transport.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4ba11d0cefe1..f2e059bbab42 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -139,7 +139,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
139 struct svcxprt_rdma *xprt; 139 struct svcxprt_rdma *xprt;
140 int i; 140 int i;
141 141
142 BUG_ON(!ctxt);
143 xprt = ctxt->xprt; 142 xprt = ctxt->xprt;
144 if (free_pages) 143 if (free_pages)
145 for (i = 0; i < ctxt->count; i++) 144 for (i = 0; i < ctxt->count; i++)
@@ -339,12 +338,14 @@ static void process_context(struct svcxprt_rdma *xprt,
339 338
340 switch (ctxt->wr_op) { 339 switch (ctxt->wr_op) {
341 case IB_WR_SEND: 340 case IB_WR_SEND:
342 BUG_ON(ctxt->frmr); 341 if (ctxt->frmr)
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
343 svc_rdma_put_context(ctxt, 1); 343 svc_rdma_put_context(ctxt, 1);
344 break; 344 break;
345 345
346 case IB_WR_RDMA_WRITE: 346 case IB_WR_RDMA_WRITE:
347 BUG_ON(ctxt->frmr); 347 if (ctxt->frmr)
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
348 svc_rdma_put_context(ctxt, 0); 349 svc_rdma_put_context(ctxt, 0);
349 break; 350 break;
350 351
@@ -353,19 +354,21 @@ static void process_context(struct svcxprt_rdma *xprt,
353 svc_rdma_put_frmr(xprt, ctxt->frmr); 354 svc_rdma_put_frmr(xprt, ctxt->frmr);
354 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { 355 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
355 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; 356 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
356 BUG_ON(!read_hdr); 357 if (read_hdr) {
357 spin_lock_bh(&xprt->sc_rq_dto_lock); 358 spin_lock_bh(&xprt->sc_rq_dto_lock);
358 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 359 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
359 list_add_tail(&read_hdr->dto_q, 360 list_add_tail(&read_hdr->dto_q,
360 &xprt->sc_read_complete_q); 361 &xprt->sc_read_complete_q);
361 spin_unlock_bh(&xprt->sc_rq_dto_lock); 362 spin_unlock_bh(&xprt->sc_rq_dto_lock);
363 } else {
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
365 }
362 svc_xprt_enqueue(&xprt->sc_xprt); 366 svc_xprt_enqueue(&xprt->sc_xprt);
363 } 367 }
364 svc_rdma_put_context(ctxt, 0); 368 svc_rdma_put_context(ctxt, 0);
365 break; 369 break;
366 370
367 default: 371 default:
368 BUG_ON(1);
369 printk(KERN_ERR "svcrdma: unexpected completion type, " 372 printk(KERN_ERR "svcrdma: unexpected completion type, "
370 "opcode=%d\n", 373 "opcode=%d\n",
371 ctxt->wr_op); 374 ctxt->wr_op);
@@ -513,7 +516,10 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
513 buflen = 0; 516 buflen = 0;
514 ctxt->direction = DMA_FROM_DEVICE; 517 ctxt->direction = DMA_FROM_DEVICE;
515 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { 518 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
516 BUG_ON(sge_no >= xprt->sc_max_sge); 519 if (sge_no >= xprt->sc_max_sge) {
520 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
521 goto err_put_ctxt;
522 }
517 page = svc_rdma_get_page(); 523 page = svc_rdma_get_page();
518 ctxt->pages[sge_no] = page; 524 ctxt->pages[sge_no] = page;
519 pa = ib_dma_map_page(xprt->sc_cm_id->device, 525 pa = ib_dma_map_page(xprt->sc_cm_id->device,
@@ -820,7 +826,7 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
820 if (frmr) { 826 if (frmr) {
821 frmr_unmap_dma(rdma, frmr); 827 frmr_unmap_dma(rdma, frmr);
822 spin_lock_bh(&rdma->sc_frmr_q_lock); 828 spin_lock_bh(&rdma->sc_frmr_q_lock);
823 BUG_ON(!list_empty(&frmr->frmr_list)); 829 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
824 list_add(&frmr->frmr_list, &rdma->sc_frmr_q); 830 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
825 spin_unlock_bh(&rdma->sc_frmr_q_lock); 831 spin_unlock_bh(&rdma->sc_frmr_q_lock);
826 } 832 }
@@ -1123,7 +1129,9 @@ static void __svc_rdma_free(struct work_struct *work)
1123 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); 1129 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1124 1130
1125 /* We should only be called from kref_put */ 1131 /* We should only be called from kref_put */
1126 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); 1132 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1133 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1134 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1127 1135
1128 /* 1136 /*
1129 * Destroy queued, but not processed read completions. Note 1137 * Destroy queued, but not processed read completions. Note
@@ -1151,8 +1159,12 @@ static void __svc_rdma_free(struct work_struct *work)
1151 } 1159 }
1152 1160
1153 /* Warn if we leaked a resource or under-referenced */ 1161 /* Warn if we leaked a resource or under-referenced */
1154 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); 1162 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1155 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); 1163 pr_err("svcrdma: ctxt still in use? (%d)\n",
1164 atomic_read(&rdma->sc_ctxt_used));
1165 if (atomic_read(&rdma->sc_dma_used) != 0)
1166 pr_err("svcrdma: dma still in use? (%d)\n",
1167 atomic_read(&rdma->sc_dma_used));
1156 1168
1157 /* De-allocate fastreg mr */ 1169 /* De-allocate fastreg mr */
1158 rdma_dealloc_frmr_q(rdma); 1170 rdma_dealloc_frmr_q(rdma);
@@ -1252,7 +1264,6 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1252 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) 1264 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1253 return -ENOTCONN; 1265 return -ENOTCONN;
1254 1266
1255 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1256 wr_count = 1; 1267 wr_count = 1;
1257 for (n_wr = wr->next; n_wr; n_wr = n_wr->next) 1268 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1258 wr_count++; 1269 wr_count++;