aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_transport.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c47
1 files changed, 29 insertions, 18 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4e618808bc98..f609c1c2d38d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -139,7 +139,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
139 struct svcxprt_rdma *xprt; 139 struct svcxprt_rdma *xprt;
140 int i; 140 int i;
141 141
142 BUG_ON(!ctxt);
143 xprt = ctxt->xprt; 142 xprt = ctxt->xprt;
144 if (free_pages) 143 if (free_pages)
145 for (i = 0; i < ctxt->count; i++) 144 for (i = 0; i < ctxt->count; i++)
@@ -339,12 +338,14 @@ static void process_context(struct svcxprt_rdma *xprt,
339 338
340 switch (ctxt->wr_op) { 339 switch (ctxt->wr_op) {
341 case IB_WR_SEND: 340 case IB_WR_SEND:
342 BUG_ON(ctxt->frmr); 341 if (ctxt->frmr)
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
343 svc_rdma_put_context(ctxt, 1); 343 svc_rdma_put_context(ctxt, 1);
344 break; 344 break;
345 345
346 case IB_WR_RDMA_WRITE: 346 case IB_WR_RDMA_WRITE:
347 BUG_ON(ctxt->frmr); 347 if (ctxt->frmr)
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
348 svc_rdma_put_context(ctxt, 0); 349 svc_rdma_put_context(ctxt, 0);
349 break; 350 break;
350 351
@@ -353,19 +354,21 @@ static void process_context(struct svcxprt_rdma *xprt,
353 svc_rdma_put_frmr(xprt, ctxt->frmr); 354 svc_rdma_put_frmr(xprt, ctxt->frmr);
354 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { 355 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
355 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; 356 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
356 BUG_ON(!read_hdr); 357 if (read_hdr) {
357 spin_lock_bh(&xprt->sc_rq_dto_lock); 358 spin_lock_bh(&xprt->sc_rq_dto_lock);
358 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 359 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
359 list_add_tail(&read_hdr->dto_q, 360 list_add_tail(&read_hdr->dto_q,
360 &xprt->sc_read_complete_q); 361 &xprt->sc_read_complete_q);
361 spin_unlock_bh(&xprt->sc_rq_dto_lock); 362 spin_unlock_bh(&xprt->sc_rq_dto_lock);
363 } else {
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
365 }
362 svc_xprt_enqueue(&xprt->sc_xprt); 366 svc_xprt_enqueue(&xprt->sc_xprt);
363 } 367 }
364 svc_rdma_put_context(ctxt, 0); 368 svc_rdma_put_context(ctxt, 0);
365 break; 369 break;
366 370
367 default: 371 default:
368 BUG_ON(1);
369 printk(KERN_ERR "svcrdma: unexpected completion type, " 372 printk(KERN_ERR "svcrdma: unexpected completion type, "
370 "opcode=%d\n", 373 "opcode=%d\n",
371 ctxt->wr_op); 374 ctxt->wr_op);
@@ -513,7 +516,10 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
513 buflen = 0; 516 buflen = 0;
514 ctxt->direction = DMA_FROM_DEVICE; 517 ctxt->direction = DMA_FROM_DEVICE;
515 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { 518 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
516 BUG_ON(sge_no >= xprt->sc_max_sge); 519 if (sge_no >= xprt->sc_max_sge) {
520 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
521 goto err_put_ctxt;
522 }
517 page = svc_rdma_get_page(); 523 page = svc_rdma_get_page();
518 ctxt->pages[sge_no] = page; 524 ctxt->pages[sge_no] = page;
519 pa = ib_dma_map_page(xprt->sc_cm_id->device, 525 pa = ib_dma_map_page(xprt->sc_cm_id->device,
@@ -687,7 +693,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
687{ 693{
688 struct rdma_cm_id *listen_id; 694 struct rdma_cm_id *listen_id;
689 struct svcxprt_rdma *cma_xprt; 695 struct svcxprt_rdma *cma_xprt;
690 struct svc_xprt *xprt;
691 int ret; 696 int ret;
692 697
693 dprintk("svcrdma: Creating RDMA socket\n"); 698 dprintk("svcrdma: Creating RDMA socket\n");
@@ -698,7 +703,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
698 cma_xprt = rdma_create_xprt(serv, 1); 703 cma_xprt = rdma_create_xprt(serv, 1);
699 if (!cma_xprt) 704 if (!cma_xprt)
700 return ERR_PTR(-ENOMEM); 705 return ERR_PTR(-ENOMEM);
701 xprt = &cma_xprt->sc_xprt;
702 706
703 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, 707 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
704 IB_QPT_RC); 708 IB_QPT_RC);
@@ -822,7 +826,7 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
822 if (frmr) { 826 if (frmr) {
823 frmr_unmap_dma(rdma, frmr); 827 frmr_unmap_dma(rdma, frmr);
824 spin_lock_bh(&rdma->sc_frmr_q_lock); 828 spin_lock_bh(&rdma->sc_frmr_q_lock);
825 BUG_ON(!list_empty(&frmr->frmr_list)); 829 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
826 list_add(&frmr->frmr_list, &rdma->sc_frmr_q); 830 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
827 spin_unlock_bh(&rdma->sc_frmr_q_lock); 831 spin_unlock_bh(&rdma->sc_frmr_q_lock);
828 } 832 }
@@ -970,10 +974,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
970 * NB: iWARP requires remote write access for the data sink 974 * NB: iWARP requires remote write access for the data sink
971 * of an RDMA_READ. IB does not. 975 * of an RDMA_READ. IB does not.
972 */ 976 */
977 newxprt->sc_reader = rdma_read_chunk_lcl;
973 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 978 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
974 newxprt->sc_frmr_pg_list_len = 979 newxprt->sc_frmr_pg_list_len =
975 devattr.max_fast_reg_page_list_len; 980 devattr.max_fast_reg_page_list_len;
976 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; 981 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
982 newxprt->sc_reader = rdma_read_chunk_frmr;
977 } 983 }
978 984
979 /* 985 /*
@@ -1125,7 +1131,9 @@ static void __svc_rdma_free(struct work_struct *work)
1125 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); 1131 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1126 1132
1127 /* We should only be called from kref_put */ 1133 /* We should only be called from kref_put */
1128 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); 1134 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1135 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1136 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1129 1137
1130 /* 1138 /*
1131 * Destroy queued, but not processed read completions. Note 1139 * Destroy queued, but not processed read completions. Note
@@ -1153,8 +1161,12 @@ static void __svc_rdma_free(struct work_struct *work)
1153 } 1161 }
1154 1162
1155 /* Warn if we leaked a resource or under-referenced */ 1163 /* Warn if we leaked a resource or under-referenced */
1156 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); 1164 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1157 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); 1165 pr_err("svcrdma: ctxt still in use? (%d)\n",
1166 atomic_read(&rdma->sc_ctxt_used));
1167 if (atomic_read(&rdma->sc_dma_used) != 0)
1168 pr_err("svcrdma: dma still in use? (%d)\n",
1169 atomic_read(&rdma->sc_dma_used));
1158 1170
1159 /* De-allocate fastreg mr */ 1171 /* De-allocate fastreg mr */
1160 rdma_dealloc_frmr_q(rdma); 1172 rdma_dealloc_frmr_q(rdma);
@@ -1254,7 +1266,6 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1254 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) 1266 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1255 return -ENOTCONN; 1267 return -ENOTCONN;
1256 1268
1257 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1258 wr_count = 1; 1269 wr_count = 1;
1259 for (n_wr = wr->next; n_wr; n_wr = n_wr->next) 1270 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1260 wr_count++; 1271 wr_count++;