aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-24 17:39:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-24 17:39:20 -0400
commit5d22c5ab85e4de3c14d9e79eefc2835f4a8f9b2a (patch)
tree2aeaa4798bf856331609d90e5ccbb478014a66ce
parent0e01df100b6bf22a1de61b66657502a6454153c5 (diff)
parentc0cb8bf3a8e4bd82e640862cdd8891400405cb89 (diff)
Merge tag 'nfsd-4.7' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "A very quiet cycle for nfsd, mainly just an RDMA update from Chuck Lever" * tag 'nfsd-4.7' of git://linux-nfs.org/~bfields/linux: sunrpc: fix stripping of padded MIC tokens svcrpc: autoload rdma module svcrdma: Generalize svc_rdma_xdr_decode_req() svcrdma: Eliminate code duplication in svc_rdma_recvfrom() svcrdma: Drain QP before freeing svcrdma_xprt svcrdma: Post Receives only for forward channel requests svcrdma: Remove superfluous line from rdma_read_chunks() svcrdma: svc_rdma_put_context() is invoked twice in Send error path svcrdma: Do not add XDR padding to xdr_buf page vector svcrdma: Support IPv6 with NFS/RDMA nfsd: handle seqid wraparound in nfsd4_preprocess_layout_stateid Remove unnecessary allocation
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/nfsd/state.h5
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/svc_xprt.c23
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c32
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c34
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c28
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c17
11 files changed, 90 insertions, 72 deletions
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 93d5853f8c99..dba2ff8eaa68 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -379,7 +379,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
379 */ 379 */
380 hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; 380 hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
381 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len 381 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
382 - hdr; 382 + rqstp->rq_arg.tail[0].iov_len - hdr;
383 /* 383 /*
384 * Round the length of the data which was specified up to 384 * Round the length of the data which was specified up to
385 * the next multiple of XDR units and then compare that 385 * the next multiple of XDR units and then compare that
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 825c7bc8d789..953c0755cb37 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -289,7 +289,7 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
289 289
290 status = nfserr_bad_stateid; 290 status = nfserr_bad_stateid;
291 mutex_lock(&ls->ls_mutex); 291 mutex_lock(&ls->ls_mutex);
292 if (stateid->si_generation > stid->sc_stateid.si_generation) 292 if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
293 goto out_unlock_stid; 293 goto out_unlock_stid;
294 if (layout_type != ls->ls_layout_type) 294 if (layout_type != ls->ls_layout_type)
295 goto out_unlock_stid; 295 goto out_unlock_stid;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 0462eeddfff9..f5f82e145018 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4651,12 +4651,6 @@ grace_disallows_io(struct net *net, struct inode *inode)
4651 return opens_in_grace(net) && mandatory_lock(inode); 4651 return opens_in_grace(net) && mandatory_lock(inode);
4652} 4652}
4653 4653
4654/* Returns true iff a is later than b: */
4655static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4656{
4657 return (s32)(a->si_generation - b->si_generation) > 0;
4658}
4659
4660static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 4654static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4661{ 4655{
4662 /* 4656 /*
@@ -4670,7 +4664,7 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
4670 return nfs_ok; 4664 return nfs_ok;
4671 4665
4672 /* If the client sends us a stateid from the future, it's buggy: */ 4666 /* If the client sends us a stateid from the future, it's buggy: */
4673 if (stateid_generation_after(in, ref)) 4667 if (nfsd4_stateid_generation_after(in, ref))
4674 return nfserr_bad_stateid; 4668 return nfserr_bad_stateid;
4675 /* 4669 /*
4676 * However, we could see a stateid from the past, even from a 4670 * However, we could see a stateid from the past, even from a
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index c050c53036a6..986e51e5ceac 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -573,6 +573,11 @@ enum nfsd4_cb_op {
573 NFSPROC4_CLNT_CB_SEQUENCE, 573 NFSPROC4_CLNT_CB_SEQUENCE,
574}; 574};
575 575
576/* Returns true iff a is later than b: */
577static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
578{
579 return (s32)(a->si_generation - b->si_generation) > 0;
580}
576 581
577struct nfsd4_compound_state; 582struct nfsd4_compound_state;
578struct nfsd_net; 583struct nfsd_net;
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3081339968c3..d6917b896d3a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
199 struct xdr_buf *rcvbuf); 199 struct xdr_buf *rcvbuf);
200 200
201/* svc_rdma_marshal.c */ 201/* svc_rdma_marshal.c */
202extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *); 202extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
204 struct rpcrdma_msg *, 204 struct rpcrdma_msg *,
205 enum rpcrdma_errcode, __be32 *); 205 enum rpcrdma_errcode, __be32 *);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1095be9c80ab..e085f5ae1548 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -569,10 +569,9 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
569 struct rsc *found; 569 struct rsc *found;
570 570
571 memset(&rsci, 0, sizeof(rsci)); 571 memset(&rsci, 0, sizeof(rsci));
572 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) 572 rsci.handle.data = handle->data;
573 return NULL; 573 rsci.handle.len = handle->len;
574 found = rsc_lookup(cd, &rsci); 574 found = rsc_lookup(cd, &rsci);
575 rsc_free(&rsci);
576 if (!found) 575 if (!found)
577 return NULL; 576 return NULL;
578 if (cache_check(cd, &found->h, NULL)) 577 if (cache_check(cd, &found->h, NULL))
@@ -857,8 +856,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
857 goto out; 856 goto out;
858 if (svc_getnl(&buf->head[0]) != seq) 857 if (svc_getnl(&buf->head[0]) != seq)
859 goto out; 858 goto out;
860 /* trim off the mic at the end before returning */ 859 /* trim off the mic and padding at the end before returning */
861 xdr_buf_trim(buf, mic.len + 4); 860 xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
862 stat = 0; 861 stat = 0;
863out: 862out:
864 kfree(mic.data); 863 kfree(mic.data);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 7422f28818b2..f5572e31d518 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -244,13 +244,12 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
244 svc_xprt_received(new); 244 svc_xprt_received(new);
245} 245}
246 246
247int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 247int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
248 struct net *net, const int family, 248 struct net *net, const int family,
249 const unsigned short port, int flags) 249 const unsigned short port, int flags)
250{ 250{
251 struct svc_xprt_class *xcl; 251 struct svc_xprt_class *xcl;
252 252
253 dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
254 spin_lock(&svc_xprt_class_lock); 253 spin_lock(&svc_xprt_class_lock);
255 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 254 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
256 struct svc_xprt *newxprt; 255 struct svc_xprt *newxprt;
@@ -274,12 +273,28 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
274 } 273 }
275 err: 274 err:
276 spin_unlock(&svc_xprt_class_lock); 275 spin_unlock(&svc_xprt_class_lock);
277 dprintk("svc: transport %s not found\n", xprt_name);
278
279 /* This errno is exposed to user space. Provide a reasonable 276 /* This errno is exposed to user space. Provide a reasonable
280 * perror msg for a bad transport. */ 277 * perror msg for a bad transport. */
281 return -EPROTONOSUPPORT; 278 return -EPROTONOSUPPORT;
282} 279}
280
281int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
282 struct net *net, const int family,
283 const unsigned short port, int flags)
284{
285 int err;
286
287 dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
288 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
289 if (err == -EPROTONOSUPPORT) {
290 request_module("svc%s", xprt_name);
291 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
292 }
293 if (err)
294 dprintk("svc: transport %s not found, err %d\n",
295 xprt_name, err);
296 return err;
297}
283EXPORT_SYMBOL_GPL(svc_create_xprt); 298EXPORT_SYMBOL_GPL(svc_create_xprt);
284 299
285/* 300/*
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 765bca47c74d..0ba9887f3e22 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -145,19 +145,32 @@ static __be32 *decode_reply_array(__be32 *va, __be32 *vaend)
145 return (__be32 *)&ary->wc_array[nchunks]; 145 return (__be32 *)&ary->wc_array[nchunks];
146} 146}
147 147
148int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp) 148/**
149 * svc_rdma_xdr_decode_req - Parse incoming RPC-over-RDMA header
150 * @rq_arg: Receive buffer
151 *
152 * On entry, xdr->head[0].iov_base points to first byte in the
153 * RPC-over-RDMA header.
154 *
155 * On successful exit, head[0] points to first byte past the
156 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
157 * The length of the RPC-over-RDMA header is returned.
158 */
159int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
149{ 160{
161 struct rpcrdma_msg *rmsgp;
150 __be32 *va, *vaend; 162 __be32 *va, *vaend;
151 unsigned int len; 163 unsigned int len;
152 u32 hdr_len; 164 u32 hdr_len;
153 165
154 /* Verify that there's enough bytes for header + something */ 166 /* Verify that there's enough bytes for header + something */
155 if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_ERR) { 167 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) {
156 dprintk("svcrdma: header too short = %d\n", 168 dprintk("svcrdma: header too short = %d\n",
157 rqstp->rq_arg.len); 169 rq_arg->len);
158 return -EINVAL; 170 return -EINVAL;
159 } 171 }
160 172
173 rmsgp = (struct rpcrdma_msg *)rq_arg->head[0].iov_base;
161 if (rmsgp->rm_vers != rpcrdma_version) { 174 if (rmsgp->rm_vers != rpcrdma_version) {
162 dprintk("%s: bad version %u\n", __func__, 175 dprintk("%s: bad version %u\n", __func__,
163 be32_to_cpu(rmsgp->rm_vers)); 176 be32_to_cpu(rmsgp->rm_vers));
@@ -189,10 +202,10 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
189 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh); 202 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh);
190 203
191 va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; 204 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
192 rqstp->rq_arg.head[0].iov_base = va; 205 rq_arg->head[0].iov_base = va;
193 len = (u32)((unsigned long)va - (unsigned long)rmsgp); 206 len = (u32)((unsigned long)va - (unsigned long)rmsgp);
194 rqstp->rq_arg.head[0].iov_len -= len; 207 rq_arg->head[0].iov_len -= len;
195 if (len > rqstp->rq_arg.len) 208 if (len > rq_arg->len)
196 return -EINVAL; 209 return -EINVAL;
197 return len; 210 return len;
198 default: 211 default:
@@ -205,7 +218,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
205 * chunk list and a reply chunk list. 218 * chunk list and a reply chunk list.
206 */ 219 */
207 va = &rmsgp->rm_body.rm_chunks[0]; 220 va = &rmsgp->rm_body.rm_chunks[0];
208 vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len); 221 vaend = (__be32 *)((unsigned long)rmsgp + rq_arg->len);
209 va = decode_read_list(va, vaend); 222 va = decode_read_list(va, vaend);
210 if (!va) { 223 if (!va) {
211 dprintk("svcrdma: failed to decode read list\n"); 224 dprintk("svcrdma: failed to decode read list\n");
@@ -222,10 +235,9 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
222 return -EINVAL; 235 return -EINVAL;
223 } 236 }
224 237
225 rqstp->rq_arg.head[0].iov_base = va; 238 rq_arg->head[0].iov_base = va;
226 hdr_len = (unsigned long)va - (unsigned long)rmsgp; 239 hdr_len = (unsigned long)va - (unsigned long)rmsgp;
227 rqstp->rq_arg.head[0].iov_len -= hdr_len; 240 rq_arg->head[0].iov_len -= hdr_len;
228
229 return hdr_len; 241 return hdr_len;
230} 242}
231 243
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index fbe7444e7de6..2c25606f2561 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -447,10 +447,8 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
447 head->arg.len = rqstp->rq_arg.len; 447 head->arg.len = rqstp->rq_arg.len;
448 head->arg.buflen = rqstp->rq_arg.buflen; 448 head->arg.buflen = rqstp->rq_arg.buflen;
449 449
450 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
451 position = be32_to_cpu(ch->rc_position);
452
453 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */ 450 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
451 position = be32_to_cpu(ch->rc_position);
454 if (position == 0) { 452 if (position == 0) {
455 head->arg.pages = &head->pages[0]; 453 head->arg.pages = &head->pages[0];
456 page_offset = head->byte_len; 454 page_offset = head->byte_len;
@@ -488,7 +486,7 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
488 if (page_offset & 3) { 486 if (page_offset & 3) {
489 u32 pad = 4 - (page_offset & 3); 487 u32 pad = 4 - (page_offset & 3);
490 488
491 head->arg.page_len += pad; 489 head->arg.tail[0].iov_len += pad;
492 head->arg.len += pad; 490 head->arg.len += pad;
493 head->arg.buflen += pad; 491 head->arg.buflen += pad;
494 page_offset += pad; 492 page_offset += pad;
@@ -510,11 +508,10 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
510 return ret; 508 return ret;
511} 509}
512 510
513static int rdma_read_complete(struct svc_rqst *rqstp, 511static void rdma_read_complete(struct svc_rqst *rqstp,
514 struct svc_rdma_op_ctxt *head) 512 struct svc_rdma_op_ctxt *head)
515{ 513{
516 int page_no; 514 int page_no;
517 int ret;
518 515
519 /* Copy RPC pages */ 516 /* Copy RPC pages */
520 for (page_no = 0; page_no < head->count; page_no++) { 517 for (page_no = 0; page_no < head->count; page_no++) {
@@ -550,23 +547,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
550 rqstp->rq_arg.tail[0] = head->arg.tail[0]; 547 rqstp->rq_arg.tail[0] = head->arg.tail[0];
551 rqstp->rq_arg.len = head->arg.len; 548 rqstp->rq_arg.len = head->arg.len;
552 rqstp->rq_arg.buflen = head->arg.buflen; 549 rqstp->rq_arg.buflen = head->arg.buflen;
553
554 /* Free the context */
555 svc_rdma_put_context(head, 0);
556
557 /* XXX: What should this be? */
558 rqstp->rq_prot = IPPROTO_MAX;
559 svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
560
561 ret = rqstp->rq_arg.head[0].iov_len
562 + rqstp->rq_arg.page_len
563 + rqstp->rq_arg.tail[0].iov_len;
564 dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
565 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
566 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
567 rqstp->rq_arg.head[0].iov_len);
568
569 return ret;
570} 550}
571 551
572/* By convention, backchannel calls arrive via rdma_msg type 552/* By convention, backchannel calls arrive via rdma_msg type
@@ -624,7 +604,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
624 dto_q); 604 dto_q);
625 list_del_init(&ctxt->dto_q); 605 list_del_init(&ctxt->dto_q);
626 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); 606 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
627 return rdma_read_complete(rqstp, ctxt); 607 rdma_read_complete(rqstp, ctxt);
608 goto complete;
628 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { 609 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
629 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, 610 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
630 struct svc_rdma_op_ctxt, 611 struct svc_rdma_op_ctxt,
@@ -655,7 +636,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
655 636
656 /* Decode the RDMA header. */ 637 /* Decode the RDMA header. */
657 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; 638 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
658 ret = svc_rdma_xdr_decode_req(rmsgp, rqstp); 639 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
659 if (ret < 0) 640 if (ret < 0)
660 goto out_err; 641 goto out_err;
661 if (ret == 0) 642 if (ret == 0)
@@ -682,6 +663,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
682 return 0; 663 return 0;
683 } 664 }
684 665
666complete:
685 ret = rqstp->rq_arg.head[0].iov_len 667 ret = rqstp->rq_arg.head[0].iov_len
686 + rqstp->rq_arg.page_len 668 + rqstp->rq_arg.page_len
687 + rqstp->rq_arg.tail[0].iov_len; 669 + rqstp->rq_arg.tail[0].iov_len;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 4f1b1c4f45f9..54d533300620 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -463,25 +463,21 @@ static int send_reply(struct svcxprt_rdma *rdma,
463 struct svc_rqst *rqstp, 463 struct svc_rqst *rqstp,
464 struct page *page, 464 struct page *page,
465 struct rpcrdma_msg *rdma_resp, 465 struct rpcrdma_msg *rdma_resp,
466 struct svc_rdma_op_ctxt *ctxt,
467 struct svc_rdma_req_map *vec, 466 struct svc_rdma_req_map *vec,
468 int byte_count) 467 int byte_count)
469{ 468{
469 struct svc_rdma_op_ctxt *ctxt;
470 struct ib_send_wr send_wr; 470 struct ib_send_wr send_wr;
471 u32 xdr_off; 471 u32 xdr_off;
472 int sge_no; 472 int sge_no;
473 int sge_bytes; 473 int sge_bytes;
474 int page_no; 474 int page_no;
475 int pages; 475 int pages;
476 int ret; 476 int ret = -EIO;
477
478 ret = svc_rdma_repost_recv(rdma, GFP_KERNEL);
479 if (ret) {
480 svc_rdma_put_context(ctxt, 0);
481 return -ENOTCONN;
482 }
483 477
484 /* Prepare the context */ 478 /* Prepare the context */
479 ctxt = svc_rdma_get_context(rdma);
480 ctxt->direction = DMA_TO_DEVICE;
485 ctxt->pages[0] = page; 481 ctxt->pages[0] = page;
486 ctxt->count = 1; 482 ctxt->count = 1;
487 483
@@ -565,8 +561,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
565 err: 561 err:
566 svc_rdma_unmap_dma(ctxt); 562 svc_rdma_unmap_dma(ctxt);
567 svc_rdma_put_context(ctxt, 1); 563 svc_rdma_put_context(ctxt, 1);
568 pr_err("svcrdma: failed to send reply, rc=%d\n", ret); 564 return ret;
569 return -EIO;
570} 565}
571 566
572void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) 567void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
@@ -585,7 +580,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
585 int ret; 580 int ret;
586 int inline_bytes; 581 int inline_bytes;
587 struct page *res_page; 582 struct page *res_page;
588 struct svc_rdma_op_ctxt *ctxt;
589 struct svc_rdma_req_map *vec; 583 struct svc_rdma_req_map *vec;
590 584
591 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); 585 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
@@ -598,8 +592,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
598 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); 592 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
599 593
600 /* Build an req vec for the XDR */ 594 /* Build an req vec for the XDR */
601 ctxt = svc_rdma_get_context(rdma);
602 ctxt->direction = DMA_TO_DEVICE;
603 vec = svc_rdma_get_req_map(rdma); 595 vec = svc_rdma_get_req_map(rdma);
604 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); 596 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
605 if (ret) 597 if (ret)
@@ -635,7 +627,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
635 inline_bytes -= ret; 627 inline_bytes -= ret;
636 } 628 }
637 629
638 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, 630 /* Post a fresh Receive buffer _before_ sending the reply */
631 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
632 if (ret)
633 goto err1;
634
635 ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
639 inline_bytes); 636 inline_bytes);
640 if (ret < 0) 637 if (ret < 0)
641 goto err1; 638 goto err1;
@@ -648,7 +645,8 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
648 put_page(res_page); 645 put_page(res_page);
649 err0: 646 err0:
650 svc_rdma_put_req_map(rdma, vec); 647 svc_rdma_put_req_map(rdma, vec);
651 svc_rdma_put_context(ctxt, 0); 648 pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
649 ret);
652 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 650 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
653 return -ENOTCONN; 651 return -ENOTCONN;
654} 652}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 90668969d559..dd9440137834 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -789,7 +789,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
789 int ret; 789 int ret;
790 790
791 dprintk("svcrdma: Creating RDMA socket\n"); 791 dprintk("svcrdma: Creating RDMA socket\n");
792 if (sa->sa_family != AF_INET) { 792 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
793 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); 793 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
794 return ERR_PTR(-EAFNOSUPPORT); 794 return ERR_PTR(-EAFNOSUPPORT);
795 } 795 }
@@ -805,6 +805,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
805 goto err0; 805 goto err0;
806 } 806 }
807 807
808 /* Allow both IPv4 and IPv6 sockets to bind a single port
809 * at the same time.
810 */
811#if IS_ENABLED(CONFIG_IPV6)
812 ret = rdma_set_afonly(listen_id, 1);
813 if (ret) {
814 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
815 goto err1;
816 }
817#endif
808 ret = rdma_bind_addr(listen_id, sa); 818 ret = rdma_bind_addr(listen_id, sa);
809 if (ret) { 819 if (ret) {
810 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); 820 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
@@ -1073,7 +1083,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1073 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; 1083 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
1074 1084
1075 /* Post receive buffers */ 1085 /* Post receive buffers */
1076 for (i = 0; i < newxprt->sc_rq_depth; i++) { 1086 for (i = 0; i < newxprt->sc_max_requests; i++) {
1077 ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); 1087 ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
1078 if (ret) { 1088 if (ret) {
1079 dprintk("svcrdma: failure posting receive buffers\n"); 1089 dprintk("svcrdma: failure posting receive buffers\n");
@@ -1170,6 +1180,9 @@ static void __svc_rdma_free(struct work_struct *work)
1170 1180
1171 dprintk("svcrdma: %s(%p)\n", __func__, rdma); 1181 dprintk("svcrdma: %s(%p)\n", __func__, rdma);
1172 1182
1183 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1184 ib_drain_qp(rdma->sc_qp);
1185
1173 /* We should only be called from kref_put */ 1186 /* We should only be called from kref_put */
1174 if (atomic_read(&xprt->xpt_ref.refcount) != 0) 1187 if (atomic_read(&xprt->xpt_ref.refcount) != 0)
1175 pr_err("svcrdma: sc_xprt still in use? (%d)\n", 1188 pr_err("svcrdma: sc_xprt still in use? (%d)\n",