aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 18:18:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 18:18:32 -0500
commitf1517df8701c9f12dae9ce7f43a5d300a6917619 (patch)
treeb7ba72e70fcdf17306d2a488b5b245e578ad52aa
parent9d21874da8ec0e0043c85cde8dda173e74ffc24d (diff)
parent175e03101d36c3034f3c80038d4c28838351a7f2 (diff)
Merge tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux
Pull nfsd update from Bruce Fields: "A fairly small update this time around. Some cleanup, RDMA fixes, overlayfs fixes, and a fix for an NFSv4 state bug. The bigger deal for nfsd this time around was Jeff Layton's already-merged i_version patches" * tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux: svcrdma: Fix Read chunk round-up NFSD: hide unused svcxdr_dupstr() nfsd: store stat times in fill_pre_wcc() instead of inode times nfsd: encode stat->mtime for getattr instead of inode->i_mtime nfsd: return RESOURCE not GARBAGE_ARGS on too many ops nfsd4: don't set lock stateid's sc_type to CLOSED nfsd: Detect unhashed stids in nfsd4_verify_open_stid() sunrpc: remove dead code in svc_sock_setbufsize svcrdma: Post Receives in the Receive completion handler nfsd4: permit layoutget of executable-only files lockd: convert nlm_rqst.a_count from atomic_t to refcount_t lockd: convert nlm_lockowner.count from atomic_t to refcount_t lockd: convert nsm_handle.sm_count from atomic_t to refcount_t
-rw-r--r--fs/locks.c6
-rw-r--r--fs/nfsd/nfs3xdr.c31
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4state.c6
-rw-r--r--fs/nfsd/nfs4xdr.c16
-rw-r--r--fs/nfsd/nfsfh.h28
-rw-r--r--fs/nfsd/nfsxdr.c1
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--net/sunrpc/svcsock.c14
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c5
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c9
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c25
14 files changed, 73 insertions, 97 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 21b4dfa289ee..d6ff4beb70ce 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1554,9 +1554,9 @@ out:
1554EXPORT_SYMBOL(__break_lease); 1554EXPORT_SYMBOL(__break_lease);
1555 1555
1556/** 1556/**
1557 * lease_get_mtime - get the last modified time of an inode 1557 * lease_get_mtime - update modified time of an inode with exclusive lease
1558 * @inode: the inode 1558 * @inode: the inode
1559 * @time: pointer to a timespec which will contain the last modified time 1559 * @time: pointer to a timespec which contains the last modified time
1560 * 1560 *
1561 * This is to force NFS clients to flush their caches for files with 1561 * This is to force NFS clients to flush their caches for files with
1562 * exclusive leases. The justification is that if someone has an 1562 * exclusive leases. The justification is that if someone has an
@@ -1580,8 +1580,6 @@ void lease_get_mtime(struct inode *inode, struct timespec *time)
1580 1580
1581 if (has_lease) 1581 if (has_lease)
1582 *time = current_time(inode); 1582 *time = current_time(inode);
1583 else
1584 *time = inode->i_mtime;
1585} 1583}
1586 1584
1587EXPORT_SYMBOL(lease_get_mtime); 1585EXPORT_SYMBOL(lease_get_mtime);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 2758480555fa..1a70581e1cb2 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -251,6 +251,34 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
251} 251}
252 252
253/* 253/*
254 * Fill in the pre_op attr for the wcc data
255 */
256void fill_pre_wcc(struct svc_fh *fhp)
257{
258 struct inode *inode;
259 struct kstat stat;
260 __be32 err;
261
262 if (fhp->fh_pre_saved)
263 return;
264
265 inode = d_inode(fhp->fh_dentry);
266 err = fh_getattr(fhp, &stat);
267 if (err) {
268 /* Grab the times from inode anyway */
269 stat.mtime = inode->i_mtime;
270 stat.ctime = inode->i_ctime;
271 stat.size = inode->i_size;
272 }
273
274 fhp->fh_pre_mtime = stat.mtime;
275 fhp->fh_pre_ctime = stat.ctime;
276 fhp->fh_pre_size = stat.size;
277 fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
278 fhp->fh_pre_saved = true;
279}
280
281/*
254 * Fill in the post_op attr for the wcc data 282 * Fill in the post_op attr for the wcc data
255 */ 283 */
256void fill_post_wcc(struct svc_fh *fhp) 284void fill_post_wcc(struct svc_fh *fhp)
@@ -261,7 +289,8 @@ void fill_post_wcc(struct svc_fh *fhp)
261 printk("nfsd: inode locked twice during operation.\n"); 289 printk("nfsd: inode locked twice during operation.\n");
262 290
263 err = fh_getattr(fhp, &fhp->fh_post_attr); 291 err = fh_getattr(fhp, &fhp->fh_post_attr);
264 fhp->fh_post_change = nfsd4_change_attribute(d_inode(fhp->fh_dentry)); 292 fhp->fh_post_change = nfsd4_change_attribute(&fhp->fh_post_attr,
293 d_inode(fhp->fh_dentry));
265 if (err) { 294 if (err) {
266 fhp->fh_post_saved = false; 295 fhp->fh_post_saved = false;
267 /* Grab the ctime anyway - set_change_info might use it */ 296 /* Grab the ctime anyway - set_change_info might use it */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 008ea0b627d0..a0bed2b2004d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1363,14 +1363,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
1363 const struct nfsd4_layout_ops *ops; 1363 const struct nfsd4_layout_ops *ops;
1364 struct nfs4_layout_stateid *ls; 1364 struct nfs4_layout_stateid *ls;
1365 __be32 nfserr; 1365 __be32 nfserr;
1366 int accmode; 1366 int accmode = NFSD_MAY_READ_IF_EXEC;
1367 1367
1368 switch (lgp->lg_seg.iomode) { 1368 switch (lgp->lg_seg.iomode) {
1369 case IOMODE_READ: 1369 case IOMODE_READ:
1370 accmode = NFSD_MAY_READ; 1370 accmode |= NFSD_MAY_READ;
1371 break; 1371 break;
1372 case IOMODE_RW: 1372 case IOMODE_RW:
1373 accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; 1373 accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
1374 break; 1374 break;
1375 default: 1375 default:
1376 dprintk("%s: invalid iomode %d\n", 1376 dprintk("%s: invalid iomode %d\n",
@@ -1703,6 +1703,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1703 status = nfserr_minor_vers_mismatch; 1703 status = nfserr_minor_vers_mismatch;
1704 if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0) 1704 if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)
1705 goto out; 1705 goto out;
1706 status = nfserr_resource;
1707 if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
1708 goto out;
1706 1709
1707 status = nfs41_check_op_ordering(args); 1710 status = nfs41_check_op_ordering(args);
1708 if (status) { 1711 if (status) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b29b5a185a2c..150521c9671b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s)
3590 switch (s->sc_type) { 3590 switch (s->sc_type) {
3591 default: 3591 default:
3592 break; 3592 break;
3593 case 0:
3593 case NFS4_CLOSED_STID: 3594 case NFS4_CLOSED_STID:
3594 case NFS4_CLOSED_DELEG_STID: 3595 case NFS4_CLOSED_DELEG_STID:
3595 ret = nfserr_bad_stateid; 3596 ret = nfserr_bad_stateid;
@@ -5182,7 +5183,6 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5182 lockowner(stp->st_stateowner))) 5183 lockowner(stp->st_stateowner)))
5183 goto out; 5184 goto out;
5184 5185
5185 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5186 release_lock_stateid(stp); 5186 release_lock_stateid(stp);
5187 ret = nfs_ok; 5187 ret = nfs_ok;
5188 5188
@@ -6078,10 +6078,8 @@ out:
6078 * If this is a new, never-before-used stateid, and we are 6078 * If this is a new, never-before-used stateid, and we are
6079 * returning an error, then just go ahead and release it. 6079 * returning an error, then just go ahead and release it.
6080 */ 6080 */
6081 if (status && new) { 6081 if (status && new)
6082 lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
6083 release_lock_stateid(lock_stp); 6082 release_lock_stateid(lock_stp);
6084 }
6085 6083
6086 mutex_unlock(&lock_stp->st_mutex); 6084 mutex_unlock(&lock_stp->st_mutex);
6087 6085
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2c61c6b8ae09..e502fd16246b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -455,8 +455,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
455 } 455 }
456 456
457 label->len = 0; 457 label->len = 0;
458#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 458 if (IS_ENABLED(CONFIG_NFSD_V4_SECURITY_LABEL) &&
459 if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) { 459 bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
460 READ_BUF(4); 460 READ_BUF(4);
461 len += 4; 461 len += 4;
462 dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */ 462 dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
@@ -476,7 +476,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
476 if (!label->data) 476 if (!label->data)
477 return nfserr_jukebox; 477 return nfserr_jukebox;
478 } 478 }
479#endif
480 if (bmval[2] & FATTR4_WORD2_MODE_UMASK) { 479 if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
481 if (!umask) 480 if (!umask)
482 goto xdr_error; 481 goto xdr_error;
@@ -1918,8 +1917,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1918 1917
1919 if (argp->taglen > NFSD4_MAX_TAGLEN) 1918 if (argp->taglen > NFSD4_MAX_TAGLEN)
1920 goto xdr_error; 1919 goto xdr_error;
1921 if (argp->opcnt > 100) 1920 /*
1922 goto xdr_error; 1921 * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS
1922 * here, so we return success at the xdr level so that
1923 * nfsd4_proc can handle this is an NFS-level error.
1924 */
1925 if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
1926 return 0;
1923 1927
1924 if (argp->opcnt > ARRAY_SIZE(argp->iops)) { 1928 if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
1925 argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); 1929 argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
@@ -1991,7 +1995,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
1991 *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time)); 1995 *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
1992 *p++ = 0; 1996 *p++ = 0;
1993 } else if (IS_I_VERSION(inode)) { 1997 } else if (IS_I_VERSION(inode)) {
1994 p = xdr_encode_hyper(p, nfsd4_change_attribute(inode)); 1998 p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
1995 } else { 1999 } else {
1996 *p++ = cpu_to_be32(stat->ctime.tv_sec); 2000 *p++ = cpu_to_be32(stat->ctime.tv_sec);
1997 *p++ = cpu_to_be32(stat->ctime.tv_nsec); 2001 *p++ = cpu_to_be32(stat->ctime.tv_nsec);
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index b8444189223b..755e256a9103 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -253,36 +253,20 @@ fh_clear_wcc(struct svc_fh *fhp)
253 * By using both ctime and the i_version counter we guarantee that as 253 * By using both ctime and the i_version counter we guarantee that as
254 * long as time doesn't go backwards we never reuse an old value. 254 * long as time doesn't go backwards we never reuse an old value.
255 */ 255 */
256static inline u64 nfsd4_change_attribute(struct inode *inode) 256static inline u64 nfsd4_change_attribute(struct kstat *stat,
257 struct inode *inode)
257{ 258{
258 u64 chattr; 259 u64 chattr;
259 260
260 chattr = inode->i_ctime.tv_sec; 261 chattr = stat->ctime.tv_sec;
261 chattr <<= 30; 262 chattr <<= 30;
262 chattr += inode->i_ctime.tv_nsec; 263 chattr += stat->ctime.tv_nsec;
263 chattr += inode_query_iversion(inode); 264 chattr += inode_query_iversion(inode);
264 return chattr; 265 return chattr;
265} 266}
266 267
267/* 268extern void fill_pre_wcc(struct svc_fh *fhp);
268 * Fill in the pre_op attr for the wcc data 269extern void fill_post_wcc(struct svc_fh *fhp);
269 */
270static inline void
271fill_pre_wcc(struct svc_fh *fhp)
272{
273 struct inode *inode;
274
275 inode = d_inode(fhp->fh_dentry);
276 if (!fhp->fh_pre_saved) {
277 fhp->fh_pre_mtime = inode->i_mtime;
278 fhp->fh_pre_ctime = inode->i_ctime;
279 fhp->fh_pre_size = inode->i_size;
280 fhp->fh_pre_change = nfsd4_change_attribute(inode);
281 fhp->fh_pre_saved = true;
282 }
283}
284
285extern void fill_post_wcc(struct svc_fh *);
286#else 270#else
287#define fh_clear_wcc(ignored) 271#define fh_clear_wcc(ignored)
288#define fill_pre_wcc(ignored) 272#define fill_pre_wcc(ignored)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 644a0342f0e0..79b6064f8977 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -188,6 +188,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
188 *p++ = htonl((u32) stat->ino); 188 *p++ = htonl((u32) stat->ino);
189 *p++ = htonl((u32) stat->atime.tv_sec); 189 *p++ = htonl((u32) stat->atime.tv_sec);
190 *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); 190 *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
191 time = stat->mtime;
191 lease_get_mtime(d_inode(dentry), &time); 192 lease_get_mtime(d_inode(dentry), &time);
192 *p++ = htonl((u32) time.tv_sec); 193 *p++ = htonl((u32) time.tv_sec);
193 *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); 194 *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 995c6fe9ee90..4b731b046bcd 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
185extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); 185extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
186extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); 186extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
187extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 187extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
188extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
189extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
190extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 188extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
191extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 189extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
192extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 190extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5570719e4787..943f2a745cd5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
384static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, 384static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
385 unsigned int rcv) 385 unsigned int rcv)
386{ 386{
387#if 0
388 mm_segment_t oldfs;
389 oldfs = get_fs(); set_fs(KERNEL_DS);
390 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
391 (char*)&snd, sizeof(snd));
392 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
393 (char*)&rcv, sizeof(rcv));
394#else
395 /* sock_setsockopt limits use to sysctl_?mem_max,
396 * which isn't acceptable. Until that is made conditional
397 * on not having CAP_SYS_RESOURCE or similar, we go direct...
398 * DaveM said I could!
399 */
400 lock_sock(sock->sk); 387 lock_sock(sock->sk);
401 sock->sk->sk_sndbuf = snd * 2; 388 sock->sk->sk_sndbuf = snd * 2;
402 sock->sk->sk_rcvbuf = rcv * 2; 389 sock->sk->sk_rcvbuf = rcv * 2;
403 sock->sk->sk_write_space(sock->sk); 390 sock->sk->sk_write_space(sock->sk);
404 release_sock(sock->sk); 391 release_sock(sock->sk);
405#endif
406} 392}
407 393
408static int svc_sock_secure_port(struct svc_rqst *rqstp) 394static int svc_sock_secure_port(struct svc_rqst *rqstp)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index af7893501e40..a73632ca9048 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -95,7 +95,6 @@ out_shortreply:
95out_notfound: 95out_notfound:
96 dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", 96 dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
97 xprt, be32_to_cpu(xid)); 97 xprt, be32_to_cpu(xid));
98
99 goto out_unlock; 98 goto out_unlock;
100} 99}
101 100
@@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
129 if (ret < 0) 128 if (ret < 0)
130 goto out_err; 129 goto out_err;
131 130
132 ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
133 if (ret)
134 goto out_err;
135
136 /* Bump page refcnt so Send completion doesn't release 131 /* Bump page refcnt so Send completion doesn't release
137 * the rq_buffer before all retransmits are complete. 132 * the rq_buffer before all retransmits are complete.
138 */ 133 */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index ad4bd62eebf1..19e9c6b33042 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
400 struct page *page; 400 struct page *page;
401 int ret; 401 int ret;
402 402
403 ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
404 if (ret)
405 return;
406
407 page = alloc_page(GFP_KERNEL); 403 page = alloc_page(GFP_KERNEL);
408 if (!page) 404 if (!page)
409 return; 405 return;
@@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
554 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, 550 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
555 &rqstp->rq_arg); 551 &rqstp->rq_arg);
556 svc_rdma_put_context(ctxt, 0); 552 svc_rdma_put_context(ctxt, 0);
557 if (ret)
558 goto repost;
559 return ret; 553 return ret;
560 } 554 }
561 555
@@ -590,6 +584,5 @@ out_postfail:
590 584
591out_drop: 585out_drop:
592 svc_rdma_put_context(ctxt, 1); 586 svc_rdma_put_context(ctxt, 1);
593repost: 587 return 0;
594 return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
595} 588}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 9bd04549a1ad..12b9a7e0b6d2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
727 head->arg.head[0].iov_len - info->ri_position; 727 head->arg.head[0].iov_len - info->ri_position;
728 head->arg.head[0].iov_len = info->ri_position; 728 head->arg.head[0].iov_len = info->ri_position;
729 729
730 /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7). 730 /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
731 * 731 *
732 * NFSv2/3 write decoders need the length of the tail to 732 * If the client already rounded up the chunk length, the
733 * contain the size of the roundup padding. 733 * length does not change. Otherwise, the length of the page
734 * list is increased to include XDR round-up.
735 *
736 * Currently these chunks always start at page offset 0,
737 * thus the rounded-up length never crosses a page boundary.
734 */ 738 */
735 head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3); 739 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
736 740
737 head->arg.page_len = info->ri_chunklen; 741 head->arg.page_len = info->ri_chunklen;
738 head->arg.len += info->ri_chunklen; 742 head->arg.len += info->ri_chunklen;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 7c3a211e0e9a..649441d5087d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
674 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); 674 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
675 } 675 }
676 676
677 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
678 if (ret)
679 goto err1;
680 ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, 677 ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp,
681 wr_lst, rp_ch); 678 wr_lst, rp_ch);
682 if (ret < 0) 679 if (ret < 0)
@@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
687 if (ret != -E2BIG && ret != -EINVAL) 684 if (ret != -E2BIG && ret != -EINVAL)
688 goto err1; 685 goto err1;
689 686
690 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
691 if (ret)
692 goto err1;
693 ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp); 687 ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp);
694 if (ret < 0) 688 if (ret < 0)
695 goto err0; 689 goto err0;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 46ec069150d5..9ad12a215b51 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -58,6 +58,7 @@
58 58
59#define RPCDBG_FACILITY RPCDBG_SVCXPRT 59#define RPCDBG_FACILITY RPCDBG_SVCXPRT
60 60
61static int svc_rdma_post_recv(struct svcxprt_rdma *xprt);
61static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); 62static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
62static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, 63static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
63 struct net *net, 64 struct net *net,
@@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
320 list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); 321 list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
321 spin_unlock(&xprt->sc_rq_dto_lock); 322 spin_unlock(&xprt->sc_rq_dto_lock);
322 323
324 svc_rdma_post_recv(xprt);
325
323 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 326 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
324 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) 327 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
325 goto out; 328 goto out;
@@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
404 return cma_xprt; 407 return cma_xprt;
405} 408}
406 409
407int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) 410static int
411svc_rdma_post_recv(struct svcxprt_rdma *xprt)
408{ 412{
409 struct ib_recv_wr recv_wr, *bad_recv_wr; 413 struct ib_recv_wr recv_wr, *bad_recv_wr;
410 struct svc_rdma_op_ctxt *ctxt; 414 struct svc_rdma_op_ctxt *ctxt;
@@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
423 pr_err("svcrdma: Too many sges (%d)\n", sge_no); 427 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
424 goto err_put_ctxt; 428 goto err_put_ctxt;
425 } 429 }
426 page = alloc_page(flags); 430 page = alloc_page(GFP_KERNEL);
427 if (!page) 431 if (!page)
428 goto err_put_ctxt; 432 goto err_put_ctxt;
429 ctxt->pages[sge_no] = page; 433 ctxt->pages[sge_no] = page;
@@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
459 return -ENOMEM; 463 return -ENOMEM;
460} 464}
461 465
462int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
463{
464 int ret = 0;
465
466 ret = svc_rdma_post_recv(xprt, flags);
467 if (ret) {
468 pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
469 ret);
470 pr_err("svcrdma: closing transport %p.\n", xprt);
471 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
472 ret = -ENOTCONN;
473 }
474 return ret;
475}
476
477static void 466static void
478svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, 467svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
479 struct rdma_conn_param *param) 468 struct rdma_conn_param *param)
@@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
833 822
834 /* Post receive buffers */ 823 /* Post receive buffers */
835 for (i = 0; i < newxprt->sc_max_requests; i++) { 824 for (i = 0; i < newxprt->sc_max_requests; i++) {
836 ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); 825 ret = svc_rdma_post_recv(newxprt);
837 if (ret) { 826 if (ret) {
838 dprintk("svcrdma: failure posting receive buffers\n"); 827 dprintk("svcrdma: failure posting receive buffers\n");
839 goto errout; 828 goto errout;