aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/nfsd/nfs3proc.c18
-rw-r--r--fs/nfsd/nfs3xdr.c5
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--net/sunrpc/svc_xprt.c24
-rw-r--r--net/sunrpc/svcsock.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c17
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
11 files changed, 65 insertions, 58 deletions
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 9eb8086ea841..8f933e84cec1 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -463,8 +463,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
463 &resp->common, nfs3svc_encode_entry); 463 &resp->common, nfs3svc_encode_entry);
464 memcpy(resp->verf, argp->verf, 8); 464 memcpy(resp->verf, argp->verf, 8);
465 resp->count = resp->buffer - argp->buffer; 465 resp->count = resp->buffer - argp->buffer;
466 if (resp->offset) 466 if (resp->offset) {
467 xdr_encode_hyper(resp->offset, argp->cookie); 467 loff_t offset = argp->cookie;
468
469 if (unlikely(resp->offset1)) {
470 /* we ended up with offset on a page boundary */
471 *resp->offset = htonl(offset >> 32);
472 *resp->offset1 = htonl(offset & 0xffffffff);
473 resp->offset1 = NULL;
474 } else {
475 xdr_encode_hyper(resp->offset, offset);
476 }
477 resp->offset = NULL;
478 }
468 479
469 RETURN_STATUS(nfserr); 480 RETURN_STATUS(nfserr);
470} 481}
@@ -533,6 +544,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
533 } else { 544 } else {
534 xdr_encode_hyper(resp->offset, offset); 545 xdr_encode_hyper(resp->offset, offset);
535 } 546 }
547 resp->offset = NULL;
536 } 548 }
537 549
538 RETURN_STATUS(nfserr); 550 RETURN_STATUS(nfserr);
@@ -576,7 +588,7 @@ nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
576 resp->f_wtmax = max_blocksize; 588 resp->f_wtmax = max_blocksize;
577 resp->f_wtpref = max_blocksize; 589 resp->f_wtpref = max_blocksize;
578 resp->f_wtmult = PAGE_SIZE; 590 resp->f_wtmult = PAGE_SIZE;
579 resp->f_dtpref = PAGE_SIZE; 591 resp->f_dtpref = max_blocksize;
580 resp->f_maxfilesize = ~(u32) 0; 592 resp->f_maxfilesize = ~(u32) 0;
581 resp->f_properties = NFS3_FSF_DEFAULT; 593 resp->f_properties = NFS3_FSF_DEFAULT;
582 594
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 9b973f4f7d01..93fea246f676 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -573,6 +573,8 @@ int
573nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p) 573nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
574{ 574{
575 struct nfsd3_readdirargs *args = rqstp->rq_argp; 575 struct nfsd3_readdirargs *args = rqstp->rq_argp;
576 u32 max_blocksize = svc_max_payload(rqstp);
577
576 p = decode_fh(p, &args->fh); 578 p = decode_fh(p, &args->fh);
577 if (!p) 579 if (!p)
578 return 0; 580 return 0;
@@ -580,7 +582,7 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
580 args->verf = p; p += 2; 582 args->verf = p; p += 2;
581 args->dircount = ~0; 583 args->dircount = ~0;
582 args->count = ntohl(*p++); 584 args->count = ntohl(*p++);
583 args->count = min_t(u32, args->count, PAGE_SIZE); 585 args->count = min_t(u32, args->count, max_blocksize);
584 args->buffer = page_address(*(rqstp->rq_next_page++)); 586 args->buffer = page_address(*(rqstp->rq_next_page++));
585 587
586 return xdr_argsize_check(rqstp, p); 588 return xdr_argsize_check(rqstp, p);
@@ -921,6 +923,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
921 } else { 923 } else {
922 xdr_encode_hyper(cd->offset, offset64); 924 xdr_encode_hyper(cd->offset, offset64);
923 } 925 }
926 cd->offset = NULL;
924 } 927 }
925 928
926 /* 929 /*
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index a9d24d5a967c..d219159b98af 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -900,9 +900,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
900 return PTR_ERR(client); 900 return PTR_ERR(client);
901 } 901 }
902 cred = get_backchannel_cred(clp, client, ses); 902 cred = get_backchannel_cred(clp, client, ses);
903 if (IS_ERR(cred)) { 903 if (!cred) {
904 rpc_shutdown_client(client); 904 rpc_shutdown_client(client);
905 return PTR_ERR(cred); 905 return -ENOMEM;
906 } 906 }
907 clp->cl_cb_client = client; 907 clp->cl_cb_client = client;
908 clp->cl_cb_cred = cred; 908 clp->cl_cb_cred = cred;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fb3c9844c82a..6a45fb00c5fc 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1544,16 +1544,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1544{ 1544{
1545 u32 slotsize = slot_bytes(ca); 1545 u32 slotsize = slot_bytes(ca);
1546 u32 num = ca->maxreqs; 1546 u32 num = ca->maxreqs;
1547 int avail; 1547 unsigned long avail, total_avail;
1548 1548
1549 spin_lock(&nfsd_drc_lock); 1549 spin_lock(&nfsd_drc_lock);
1550 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, 1550 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1551 nfsd_drc_max_mem - nfsd_drc_mem_used); 1551 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1552 /* 1552 /*
1553 * Never use more than a third of the remaining memory, 1553 * Never use more than a third of the remaining memory,
1554 * unless it's the only way to give this client a slot: 1554 * unless it's the only way to give this client a slot:
1555 */ 1555 */
1556 avail = clamp_t(int, avail, slotsize, avail/3); 1556 avail = clamp_t(int, avail, slotsize, total_avail/3);
1557 num = min_t(int, num, avail / slotsize); 1557 num = min_t(int, num, avail / slotsize);
1558 nfsd_drc_mem_used += num * slotsize; 1558 nfsd_drc_mem_used += num * slotsize;
1559 spin_unlock(&nfsd_drc_lock); 1559 spin_unlock(&nfsd_drc_lock);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 72a7681f4046..f2feb2d11bae 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
1126 case 'Y': 1126 case 'Y':
1127 case 'y': 1127 case 'y':
1128 case '1': 1128 case '1':
1129 if (nn->nfsd_serv) 1129 if (!nn->nfsd_serv)
1130 return -EBUSY; 1130 return -EBUSY;
1131 nfsd4_end_grace(nn); 1131 nfsd4_end_grace(nn);
1132 break; 1132 break;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4eb8fbf2508d..61530b1b7754 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -357,15 +357,29 @@ static void svc_xprt_release_slot(struct svc_rqst *rqstp)
357 struct svc_xprt *xprt = rqstp->rq_xprt; 357 struct svc_xprt *xprt = rqstp->rq_xprt;
358 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { 358 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
359 atomic_dec(&xprt->xpt_nr_rqsts); 359 atomic_dec(&xprt->xpt_nr_rqsts);
360 smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
360 svc_xprt_enqueue(xprt); 361 svc_xprt_enqueue(xprt);
361 } 362 }
362} 363}
363 364
364static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 365static bool svc_xprt_ready(struct svc_xprt *xprt)
365{ 366{
366 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 367 unsigned long xpt_flags;
368
369 /*
370 * If another cpu has recently updated xpt_flags,
371 * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to
372 * know about it; otherwise it's possible that both that cpu and
373 * this one could call svc_xprt_enqueue() without either
374 * svc_xprt_enqueue() recognizing that the conditions below
375 * are satisfied, and we could stall indefinitely:
376 */
377 smp_rmb();
378 xpt_flags = READ_ONCE(xprt->xpt_flags);
379
380 if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
367 return true; 381 return true;
368 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) { 382 if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
369 if (xprt->xpt_ops->xpo_has_wspace(xprt) && 383 if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
370 svc_xprt_slots_in_range(xprt)) 384 svc_xprt_slots_in_range(xprt))
371 return true; 385 return true;
@@ -381,7 +395,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
381 struct svc_rqst *rqstp = NULL; 395 struct svc_rqst *rqstp = NULL;
382 int cpu; 396 int cpu;
383 397
384 if (!svc_xprt_has_something_to_do(xprt)) 398 if (!svc_xprt_ready(xprt))
385 return; 399 return;
386 400
387 /* Mark transport as busy. It will remain in this state until 401 /* Mark transport as busy. It will remain in this state until
@@ -475,7 +489,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
475 if (xprt && space < rqstp->rq_reserved) { 489 if (xprt && space < rqstp->rq_reserved) {
476 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 490 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
477 rqstp->rq_reserved = space; 491 rqstp->rq_reserved = space;
478 492 smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
479 svc_xprt_enqueue(xprt); 493 svc_xprt_enqueue(xprt);
480 } 494 }
481} 495}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6a060925e5d..43590a968b73 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -349,12 +349,16 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
349/* 349/*
350 * Set socket snd and rcv buffer lengths 350 * Set socket snd and rcv buffer lengths
351 */ 351 */
352static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, 352static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
353 unsigned int rcv)
354{ 353{
354 unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
355 struct socket *sock = svsk->sk_sock;
356
357 nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
358
355 lock_sock(sock->sk); 359 lock_sock(sock->sk);
356 sock->sk->sk_sndbuf = snd * 2; 360 sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
357 sock->sk->sk_rcvbuf = rcv * 2; 361 sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
358 sock->sk->sk_write_space(sock->sk); 362 sock->sk->sk_write_space(sock->sk);
359 release_sock(sock->sk); 363 release_sock(sock->sk);
360} 364}
@@ -516,9 +520,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
516 * provides an upper bound on the number of threads 520 * provides an upper bound on the number of threads
517 * which will access the socket. 521 * which will access the socket.
518 */ 522 */
519 svc_sock_setbufsize(svsk->sk_sock, 523 svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
520 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
521 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
522 524
523 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 525 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
524 skb = NULL; 526 skb = NULL;
@@ -681,9 +683,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
681 * receive and respond to one request. 683 * receive and respond to one request.
682 * svc_udp_recvfrom will re-adjust if necessary 684 * svc_udp_recvfrom will re-adjust if necessary
683 */ 685 */
684 svc_sock_setbufsize(svsk->sk_sock, 686 svc_sock_setbufsize(svsk, 3);
685 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
686 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
687 687
688 /* data might have come in before data_ready set up */ 688 /* data might have come in before data_ready set up */
689 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 689 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 828b149eaaef..65e2fb9aac65 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -272,11 +272,8 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
272 return false; 272 return false;
273 ctxt->rc_temp = true; 273 ctxt->rc_temp = true;
274 ret = __svc_rdma_post_recv(rdma, ctxt); 274 ret = __svc_rdma_post_recv(rdma, ctxt);
275 if (ret) { 275 if (ret)
276 pr_err("svcrdma: failure posting recv buffers: %d\n",
277 ret);
278 return false; 276 return false;
279 }
280 } 277 }
281 return true; 278 return true;
282} 279}
@@ -314,17 +311,14 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
314 311
315 spin_lock(&rdma->sc_rq_dto_lock); 312 spin_lock(&rdma->sc_rq_dto_lock);
316 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); 313 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
317 spin_unlock(&rdma->sc_rq_dto_lock); 314 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
318 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 315 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
316 spin_unlock(&rdma->sc_rq_dto_lock);
319 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 317 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
320 svc_xprt_enqueue(&rdma->sc_xprt); 318 svc_xprt_enqueue(&rdma->sc_xprt);
321 goto out; 319 goto out;
322 320
323flushed: 321flushed:
324 if (wc->status != IB_WC_WR_FLUSH_ERR)
325 pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
326 ib_wc_status_msg(wc->status),
327 wc->status, wc->vendor_err);
328post_err: 322post_err:
329 svc_rdma_recv_ctxt_put(rdma, ctxt); 323 svc_rdma_recv_ctxt_put(rdma, ctxt);
330 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 324 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index dc1951759a8e..2121c9b4d275 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -64,8 +64,7 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
64 spin_unlock(&rdma->sc_rw_ctxt_lock); 64 spin_unlock(&rdma->sc_rw_ctxt_lock);
65 } else { 65 } else {
66 spin_unlock(&rdma->sc_rw_ctxt_lock); 66 spin_unlock(&rdma->sc_rw_ctxt_lock);
67 ctxt = kmalloc(sizeof(*ctxt) + 67 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
68 SG_CHUNK_SIZE * sizeof(struct scatterlist),
69 GFP_KERNEL); 68 GFP_KERNEL);
70 if (!ctxt) 69 if (!ctxt)
71 goto out; 70 goto out;
@@ -213,13 +212,8 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
213 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 212 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
214 wake_up(&rdma->sc_send_wait); 213 wake_up(&rdma->sc_send_wait);
215 214
216 if (unlikely(wc->status != IB_WC_SUCCESS)) { 215 if (unlikely(wc->status != IB_WC_SUCCESS))
217 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 216 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
218 if (wc->status != IB_WC_WR_FLUSH_ERR)
219 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
220 ib_wc_status_msg(wc->status),
221 wc->status, wc->vendor_err);
222 }
223 217
224 svc_rdma_write_info_free(info); 218 svc_rdma_write_info_free(info);
225} 219}
@@ -278,18 +272,15 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
278 272
279 if (unlikely(wc->status != IB_WC_SUCCESS)) { 273 if (unlikely(wc->status != IB_WC_SUCCESS)) {
280 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 274 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
281 if (wc->status != IB_WC_WR_FLUSH_ERR)
282 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
283 ib_wc_status_msg(wc->status),
284 wc->status, wc->vendor_err);
285 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); 275 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
286 } else { 276 } else {
287 spin_lock(&rdma->sc_rq_dto_lock); 277 spin_lock(&rdma->sc_rq_dto_lock);
288 list_add_tail(&info->ri_readctxt->rc_list, 278 list_add_tail(&info->ri_readctxt->rc_list,
289 &rdma->sc_read_complete_q); 279 &rdma->sc_read_complete_q);
280 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
281 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
290 spin_unlock(&rdma->sc_rq_dto_lock); 282 spin_unlock(&rdma->sc_rq_dto_lock);
291 283
292 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
293 svc_xprt_enqueue(&rdma->sc_xprt); 284 svc_xprt_enqueue(&rdma->sc_xprt);
294 } 285 }
295 286
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 1f200119268c..6fdba72f89f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -272,10 +272,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
272 if (unlikely(wc->status != IB_WC_SUCCESS)) { 272 if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 svc_xprt_enqueue(&rdma->sc_xprt); 274 svc_xprt_enqueue(&rdma->sc_xprt);
275 if (wc->status != IB_WC_WR_FLUSH_ERR)
276 pr_err("svcrdma: Send: %s (%u/0x%x)\n",
277 ib_wc_status_msg(wc->status),
278 wc->status, wc->vendor_err);
279 } 275 }
280 276
281 svc_xprt_put(&rdma->sc_xprt); 277 svc_xprt_put(&rdma->sc_xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 57f86c63a463..027a3b07d329 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -390,8 +390,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
390 struct ib_qp_init_attr qp_attr; 390 struct ib_qp_init_attr qp_attr;
391 unsigned int ctxts, rq_depth; 391 unsigned int ctxts, rq_depth;
392 struct ib_device *dev; 392 struct ib_device *dev;
393 struct sockaddr *sap;
394 int ret = 0; 393 int ret = 0;
394 RPC_IFDEBUG(struct sockaddr *sap);
395 395
396 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); 396 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
397 clear_bit(XPT_CONN, &xprt->xpt_flags); 397 clear_bit(XPT_CONN, &xprt->xpt_flags);
@@ -525,6 +525,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
525 if (ret) 525 if (ret)
526 goto errout; 526 goto errout;
527 527
528#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
528 dprintk("svcrdma: new connection %p accepted:\n", newxprt); 529 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
529 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; 530 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
530 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap)); 531 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
@@ -535,6 +536,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
535 dprintk(" rdma_rw_ctxs : %d\n", ctxts); 536 dprintk(" rdma_rw_ctxs : %d\n", ctxts);
536 dprintk(" max_requests : %d\n", newxprt->sc_max_requests); 537 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
537 dprintk(" ord : %d\n", conn_param.initiator_depth); 538 dprintk(" ord : %d\n", conn_param.initiator_depth);
539#endif
538 540
539 trace_svcrdma_xprt_accept(&newxprt->sc_xprt); 541 trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
540 return &newxprt->sc_xprt; 542 return &newxprt->sc_xprt;
@@ -588,11 +590,6 @@ static void __svc_rdma_free(struct work_struct *work)
588 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) 590 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
589 ib_drain_qp(rdma->sc_qp); 591 ib_drain_qp(rdma->sc_qp);
590 592
591 /* We should only be called from kref_put */
592 if (kref_read(&xprt->xpt_ref) != 0)
593 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
594 kref_read(&xprt->xpt_ref));
595
596 svc_rdma_flush_recv_queues(rdma); 593 svc_rdma_flush_recv_queues(rdma);
597 594
598 /* Final put of backchannel client transport */ 595 /* Final put of backchannel client transport */