aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-01-13 11:03:03 -0500
committerJ. Bruce Fields <bfields@redhat.com>2015-01-15 15:01:45 -0500
commit3fe04ee9f91084e7e6e999b09b8b15bcf97375e8 (patch)
treee6114e9643a87905b704c7103252e181043432ff /net/sunrpc/xprtrdma
parent2397aa8b515f7bd77c8d5698170b6a98fdd6721c (diff)
svcrdma: Scrub BUG_ON() and WARN_ON() call sites
Current convention is to avoid using BUG_ON() in places where an oops could cause complete system failure. Replace BUG_ON() call sites in svcrdma with an assertion error message and allow execution to continue safely. Some BUG_ON() calls are removed because they have never fired in production (that we are aware of). Some WARN_ON() calls are also replaced where a back trace is not helpful; e.g., in a workqueue task. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c11
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c28
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c43
3 files changed, 49 insertions, 33 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index b3b7bb85844d..577f8659ca30 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -95,14 +95,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
95 rqstp->rq_respages = &rqstp->rq_pages[sge_no]; 95 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
96 rqstp->rq_next_page = rqstp->rq_respages + 1; 96 rqstp->rq_next_page = rqstp->rq_respages + 1;
97 97
98 /* We should never run out of SGE because the limit is defined to
99 * support the max allowed RPC data length
100 */
101 BUG_ON(bc && (sge_no == ctxt->count));
102 BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
103 != byte_count);
104 BUG_ON(rqstp->rq_arg.len != byte_count);
105
106 /* If not all pages were used from the SGL, free the remaining ones */ 98 /* If not all pages were used from the SGL, free the remaining ones */
107 bc = sge_no; 99 bc = sge_no;
108 while (sge_no < ctxt->count) { 100 while (sge_no < ctxt->count) {
@@ -477,8 +469,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
477 int page_no; 469 int page_no;
478 int ret; 470 int ret;
479 471
480 BUG_ON(!head);
481
482 /* Copy RPC pages */ 472 /* Copy RPC pages */
483 for (page_no = 0; page_no < head->count; page_no++) { 473 for (page_no = 0; page_no < head->count; page_no++) {
484 put_page(rqstp->rq_pages[page_no]); 474 put_page(rqstp->rq_pages[page_no]);
@@ -567,7 +557,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
567 } 557 }
568 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", 558 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
569 ctxt, rdma_xprt, rqstp, ctxt->wc_status); 559 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
570 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
571 atomic_inc(&rdma_stat_recv); 560 atomic_inc(&rdma_stat_recv);
572 561
573 /* Build up the XDR from the receive buffers. */ 562 /* Build up the XDR from the receive buffers. */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 9f1b50689c0f..7d79897959a4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -60,8 +60,11 @@ static int map_xdr(struct svcxprt_rdma *xprt,
60 u32 page_off; 60 u32 page_off;
61 int page_no; 61 int page_no;
62 62
63 BUG_ON(xdr->len != 63 if (xdr->len !=
64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)); 64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
65 pr_err("svcrdma: map_xdr: XDR buffer length error\n");
66 return -EIO;
67 }
65 68
66 /* Skip the first sge, this is for the RPCRDMA header */ 69 /* Skip the first sge, this is for the RPCRDMA header */
67 sge_no = 1; 70 sge_no = 1;
@@ -150,7 +153,11 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
150 int bc; 153 int bc;
151 struct svc_rdma_op_ctxt *ctxt; 154 struct svc_rdma_op_ctxt *ctxt;
152 155
153 BUG_ON(vec->count > RPCSVC_MAXPAGES); 156 if (vec->count > RPCSVC_MAXPAGES) {
157 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
158 return -EIO;
159 }
160
154 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " 161 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
155 "write_len=%d, vec->sge=%p, vec->count=%lu\n", 162 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
156 rmr, (unsigned long long)to, xdr_off, 163 rmr, (unsigned long long)to, xdr_off,
@@ -190,7 +197,10 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
190 sge_off = 0; 197 sge_off = 0;
191 sge_no++; 198 sge_no++;
192 xdr_sge_no++; 199 xdr_sge_no++;
193 BUG_ON(xdr_sge_no > vec->count); 200 if (xdr_sge_no > vec->count) {
201 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
202 goto err;
203 }
194 bc -= sge_bytes; 204 bc -= sge_bytes;
195 if (sge_no == xprt->sc_max_sge) 205 if (sge_no == xprt->sc_max_sge)
196 break; 206 break;
@@ -421,7 +431,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
421 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey; 431 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
422 ctxt->sge[sge_no].length = sge_bytes; 432 ctxt->sge[sge_no].length = sge_bytes;
423 } 433 }
424 BUG_ON(byte_count != 0); 434 if (byte_count != 0) {
435 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
436 goto err;
437 }
425 438
426 /* Save all respages in the ctxt and remove them from the 439 /* Save all respages in the ctxt and remove them from the
427 * respages array. They are our pages until the I/O 440 * respages array. They are our pages until the I/O
@@ -442,7 +455,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
442 } 455 }
443 rqstp->rq_next_page = rqstp->rq_respages + 1; 456 rqstp->rq_next_page = rqstp->rq_respages + 1;
444 457
445 BUG_ON(sge_no > rdma->sc_max_sge); 458 if (sge_no > rdma->sc_max_sge) {
459 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
460 goto err;
461 }
446 memset(&send_wr, 0, sizeof send_wr); 462 memset(&send_wr, 0, sizeof send_wr);
447 ctxt->wr_op = IB_WR_SEND; 463 ctxt->wr_op = IB_WR_SEND;
448 send_wr.wr_id = (unsigned long)ctxt; 464 send_wr.wr_id = (unsigned long)ctxt;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4ba11d0cefe1..f2e059bbab42 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -139,7 +139,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
139 struct svcxprt_rdma *xprt; 139 struct svcxprt_rdma *xprt;
140 int i; 140 int i;
141 141
142 BUG_ON(!ctxt);
143 xprt = ctxt->xprt; 142 xprt = ctxt->xprt;
144 if (free_pages) 143 if (free_pages)
145 for (i = 0; i < ctxt->count; i++) 144 for (i = 0; i < ctxt->count; i++)
@@ -339,12 +338,14 @@ static void process_context(struct svcxprt_rdma *xprt,
339 338
340 switch (ctxt->wr_op) { 339 switch (ctxt->wr_op) {
341 case IB_WR_SEND: 340 case IB_WR_SEND:
342 BUG_ON(ctxt->frmr); 341 if (ctxt->frmr)
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
343 svc_rdma_put_context(ctxt, 1); 343 svc_rdma_put_context(ctxt, 1);
344 break; 344 break;
345 345
346 case IB_WR_RDMA_WRITE: 346 case IB_WR_RDMA_WRITE:
347 BUG_ON(ctxt->frmr); 347 if (ctxt->frmr)
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
348 svc_rdma_put_context(ctxt, 0); 349 svc_rdma_put_context(ctxt, 0);
349 break; 350 break;
350 351
@@ -353,19 +354,21 @@ static void process_context(struct svcxprt_rdma *xprt,
353 svc_rdma_put_frmr(xprt, ctxt->frmr); 354 svc_rdma_put_frmr(xprt, ctxt->frmr);
354 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { 355 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
355 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; 356 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
356 BUG_ON(!read_hdr); 357 if (read_hdr) {
357 spin_lock_bh(&xprt->sc_rq_dto_lock); 358 spin_lock_bh(&xprt->sc_rq_dto_lock);
358 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 359 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
359 list_add_tail(&read_hdr->dto_q, 360 list_add_tail(&read_hdr->dto_q,
360 &xprt->sc_read_complete_q); 361 &xprt->sc_read_complete_q);
361 spin_unlock_bh(&xprt->sc_rq_dto_lock); 362 spin_unlock_bh(&xprt->sc_rq_dto_lock);
363 } else {
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
365 }
362 svc_xprt_enqueue(&xprt->sc_xprt); 366 svc_xprt_enqueue(&xprt->sc_xprt);
363 } 367 }
364 svc_rdma_put_context(ctxt, 0); 368 svc_rdma_put_context(ctxt, 0);
365 break; 369 break;
366 370
367 default: 371 default:
368 BUG_ON(1);
369 printk(KERN_ERR "svcrdma: unexpected completion type, " 372 printk(KERN_ERR "svcrdma: unexpected completion type, "
370 "opcode=%d\n", 373 "opcode=%d\n",
371 ctxt->wr_op); 374 ctxt->wr_op);
@@ -513,7 +516,10 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
513 buflen = 0; 516 buflen = 0;
514 ctxt->direction = DMA_FROM_DEVICE; 517 ctxt->direction = DMA_FROM_DEVICE;
515 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { 518 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
516 BUG_ON(sge_no >= xprt->sc_max_sge); 519 if (sge_no >= xprt->sc_max_sge) {
520 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
521 goto err_put_ctxt;
522 }
517 page = svc_rdma_get_page(); 523 page = svc_rdma_get_page();
518 ctxt->pages[sge_no] = page; 524 ctxt->pages[sge_no] = page;
519 pa = ib_dma_map_page(xprt->sc_cm_id->device, 525 pa = ib_dma_map_page(xprt->sc_cm_id->device,
@@ -820,7 +826,7 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
820 if (frmr) { 826 if (frmr) {
821 frmr_unmap_dma(rdma, frmr); 827 frmr_unmap_dma(rdma, frmr);
822 spin_lock_bh(&rdma->sc_frmr_q_lock); 828 spin_lock_bh(&rdma->sc_frmr_q_lock);
823 BUG_ON(!list_empty(&frmr->frmr_list)); 829 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
824 list_add(&frmr->frmr_list, &rdma->sc_frmr_q); 830 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
825 spin_unlock_bh(&rdma->sc_frmr_q_lock); 831 spin_unlock_bh(&rdma->sc_frmr_q_lock);
826 } 832 }
@@ -1123,7 +1129,9 @@ static void __svc_rdma_free(struct work_struct *work)
1123 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); 1129 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1124 1130
1125 /* We should only be called from kref_put */ 1131 /* We should only be called from kref_put */
1126 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); 1132 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1133 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1134 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1127 1135
1128 /* 1136 /*
1129 * Destroy queued, but not processed read completions. Note 1137 * Destroy queued, but not processed read completions. Note
@@ -1151,8 +1159,12 @@ static void __svc_rdma_free(struct work_struct *work)
1151 } 1159 }
1152 1160
1153 /* Warn if we leaked a resource or under-referenced */ 1161 /* Warn if we leaked a resource or under-referenced */
1154 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); 1162 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1155 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); 1163 pr_err("svcrdma: ctxt still in use? (%d)\n",
1164 atomic_read(&rdma->sc_ctxt_used));
1165 if (atomic_read(&rdma->sc_dma_used) != 0)
1166 pr_err("svcrdma: dma still in use? (%d)\n",
1167 atomic_read(&rdma->sc_dma_used));
1156 1168
1157 /* De-allocate fastreg mr */ 1169 /* De-allocate fastreg mr */
1158 rdma_dealloc_frmr_q(rdma); 1170 rdma_dealloc_frmr_q(rdma);
@@ -1252,7 +1264,6 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1252 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) 1264 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1253 return -ENOTCONN; 1265 return -ENOTCONN;
1254 1266
1255 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1256 wr_count = 1; 1267 wr_count = 1;
1257 for (n_wr = wr->next; n_wr; n_wr = n_wr->next) 1268 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1258 wr_count++; 1269 wr_count++;