aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 16:31:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 16:31:49 -0400
commitad9a19d003703ae06a6e8efc64cf26a939d9e84d (patch)
tree2383dde22df7b69e09c3190bce5ca062b756b1f3
parent66ba772ee3119849fcdd8ac9766c6c25ede4a982 (diff)
parent26fb2254dd33b02a522fac42745693f5969b7d4b (diff)
Merge tag 'nfsd-4.14' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "More RDMA work and some op-structure constification from Chuck Lever, and a small cleanup to our xdr encoding" * tag 'nfsd-4.14' of git://linux-nfs.org/~bfields/linux: svcrdma: Estimate Send Queue depth properly rdma core: Add rdma_rw_mr_payload() svcrdma: Limit RQ depth svcrdma: Populate tail iovec when receiving nfsd: Incoming xdr_bufs may have content in tail buffer svcrdma: Clean up svc_rdma_build_read_chunk() sunrpc: Const-ify struct sv_serv_ops nfsd: Const-ify NFSv4 encoding and decoding ops arrays sunrpc: Const-ify instances of struct svc_xprt_ops nfsd4: individual encoders no longer see error cases nfsd4: skip encoder in trivial error cases nfsd4: define ->op_release for compound ops nfsd4: opdesc will be useful outside nfs4proc.c nfsd4: move some nfsd4 op definitions to xdr4.h
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/nfs/callback.c10
-rw-r--r--fs/nfsd/nfs4proc.c110
-rw-r--r--fs/nfsd/nfs4xdr.c346
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/xdr4.h50
-rw-r--r--include/linux/sunrpc/svc.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h4
-rw-r--r--include/rdma/rw.h2
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c116
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c40
14 files changed, 323 insertions, 401 deletions
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index dbfd854c32c9..6ca607e8e293 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -643,6 +643,30 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
643} 643}
644EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); 644EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
645 645
646/**
647 * rdma_rw_mr_factor - return number of MRs required for a payload
648 * @device: device handling the connection
649 * @port_num: port num to which the connection is bound
650 * @maxpages: maximum payload pages per rdma_rw_ctx
651 *
652 * Returns the number of MRs the device requires to move @maxpayload
653 * bytes. The returned value is used during transport creation to
654 * compute max_rdma_ctxts and the size of the transport's Send and
655 * Send Completion Queues.
656 */
657unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
658 unsigned int maxpages)
659{
660 unsigned int mr_pages;
661
662 if (rdma_rw_can_use_mr(device, port_num))
663 mr_pages = rdma_rw_fr_page_list_len(device);
664 else
665 mr_pages = device->attrs.max_sge_rd;
666 return DIV_ROUND_UP(maxpages, mr_pages);
667}
668EXPORT_SYMBOL(rdma_rw_mr_factor);
669
646void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) 670void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
647{ 671{
648 u32 factor; 672 u32 factor;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 726b6cecf430..b995bdc13976 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -396,7 +396,7 @@ out_rqst:
396 return error; 396 return error;
397} 397}
398 398
399static struct svc_serv_ops lockd_sv_ops = { 399static const struct svc_serv_ops lockd_sv_ops = {
400 .svo_shutdown = svc_rpcb_cleanup, 400 .svo_shutdown = svc_rpcb_cleanup,
401 .svo_enqueue_xprt = svc_xprt_do_enqueue, 401 .svo_enqueue_xprt = svc_xprt_do_enqueue,
402}; 402};
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 34323877ec13..2cddf7f437e6 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -226,26 +226,26 @@ err_bind:
226 return ret; 226 return ret;
227} 227}
228 228
229static struct svc_serv_ops nfs40_cb_sv_ops = { 229static const struct svc_serv_ops nfs40_cb_sv_ops = {
230 .svo_function = nfs4_callback_svc, 230 .svo_function = nfs4_callback_svc,
231 .svo_enqueue_xprt = svc_xprt_do_enqueue, 231 .svo_enqueue_xprt = svc_xprt_do_enqueue,
232 .svo_setup = svc_set_num_threads_sync, 232 .svo_setup = svc_set_num_threads_sync,
233 .svo_module = THIS_MODULE, 233 .svo_module = THIS_MODULE,
234}; 234};
235#if defined(CONFIG_NFS_V4_1) 235#if defined(CONFIG_NFS_V4_1)
236static struct svc_serv_ops nfs41_cb_sv_ops = { 236static const struct svc_serv_ops nfs41_cb_sv_ops = {
237 .svo_function = nfs41_callback_svc, 237 .svo_function = nfs41_callback_svc,
238 .svo_enqueue_xprt = svc_xprt_do_enqueue, 238 .svo_enqueue_xprt = svc_xprt_do_enqueue,
239 .svo_setup = svc_set_num_threads_sync, 239 .svo_setup = svc_set_num_threads_sync,
240 .svo_module = THIS_MODULE, 240 .svo_module = THIS_MODULE,
241}; 241};
242 242
243static struct svc_serv_ops *nfs4_cb_sv_ops[] = { 243static const struct svc_serv_ops *nfs4_cb_sv_ops[] = {
244 [0] = &nfs40_cb_sv_ops, 244 [0] = &nfs40_cb_sv_ops,
245 [1] = &nfs41_cb_sv_ops, 245 [1] = &nfs41_cb_sv_ops,
246}; 246};
247#else 247#else
248static struct svc_serv_ops *nfs4_cb_sv_ops[] = { 248static const struct svc_serv_ops *nfs4_cb_sv_ops[] = {
249 [0] = &nfs40_cb_sv_ops, 249 [0] = &nfs40_cb_sv_ops,
250 [1] = NULL, 250 [1] = NULL,
251}; 251};
@@ -254,8 +254,8 @@ static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
254static struct svc_serv *nfs_callback_create_svc(int minorversion) 254static struct svc_serv *nfs_callback_create_svc(int minorversion)
255{ 255{
256 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; 256 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
257 const struct svc_serv_ops *sv_ops;
257 struct svc_serv *serv; 258 struct svc_serv *serv;
258 struct svc_serv_ops *sv_ops;
259 259
260 /* 260 /*
261 * Check whether we're already up and running. 261 * Check whether we're already up and running.
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index d27e75ad25e3..3c69db7d4905 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -784,6 +784,14 @@ out:
784 return status; 784 return status;
785} 785}
786 786
787
788static void
789nfsd4_read_release(union nfsd4_op_u *u)
790{
791 if (u->read.rd_filp)
792 fput(u->read.rd_filp);
793}
794
787static __be32 795static __be32
788nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 796nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
789 union nfsd4_op_u *u) 797 union nfsd4_op_u *u)
@@ -912,6 +920,13 @@ nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstat
912 return nfs_ok; 920 return nfs_ok;
913} 921}
914 922
923static void
924nfsd4_secinfo_release(union nfsd4_op_u *u)
925{
926 if (u->secinfo.si_exp)
927 exp_put(u->secinfo.si_exp);
928}
929
915static __be32 930static __be32
916nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 931nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
917 union nfsd4_op_u *u) 932 union nfsd4_op_u *u)
@@ -1335,6 +1350,12 @@ out:
1335 return nfserr; 1350 return nfserr;
1336} 1351}
1337 1352
1353static void
1354nfsd4_getdeviceinfo_release(union nfsd4_op_u *u)
1355{
1356 kfree(u->getdeviceinfo.gd_device);
1357}
1358
1338static __be32 1359static __be32
1339nfsd4_layoutget(struct svc_rqst *rqstp, 1360nfsd4_layoutget(struct svc_rqst *rqstp,
1340 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 1361 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
@@ -1415,6 +1436,12 @@ out:
1415 return nfserr; 1436 return nfserr;
1416} 1437}
1417 1438
1439static void
1440nfsd4_layoutget_release(union nfsd4_op_u *u)
1441{
1442 kfree(u->layoutget.lg_content);
1443}
1444
1418static __be32 1445static __be32
1419nfsd4_layoutcommit(struct svc_rqst *rqstp, 1446nfsd4_layoutcommit(struct svc_rqst *rqstp,
1420 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 1447 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
@@ -1541,49 +1568,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
1541 nfsdstats.nfs4_opcount[opnum]++; 1568 nfsdstats.nfs4_opcount[opnum]++;
1542} 1569}
1543 1570
1544enum nfsd4_op_flags {
1545 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
1546 ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
1547 ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
1548 /* For rfc 5661 section 2.6.3.1.1: */
1549 OP_HANDLES_WRONGSEC = 1 << 3,
1550 OP_IS_PUTFH_LIKE = 1 << 4,
1551 /*
1552 * These are the ops whose result size we estimate before
1553 * encoding, to avoid performing an op then not being able to
1554 * respond or cache a response. This includes writes and setattrs
1555 * as well as the operations usually called "nonidempotent":
1556 */
1557 OP_MODIFIES_SOMETHING = 1 << 5,
1558 /*
1559 * Cache compounds containing these ops in the xid-based drc:
1560 * We use the DRC for compounds containing non-idempotent
1561 * operations, *except* those that are 4.1-specific (since
1562 * sessions provide their own EOS), and except for stateful
1563 * operations other than setclientid and setclientid_confirm
1564 * (since sequence numbers provide EOS for open, lock, etc in
1565 * the v4.0 case).
1566 */
1567 OP_CACHEME = 1 << 6,
1568 /*
1569 * These are ops which clear current state id.
1570 */
1571 OP_CLEAR_STATEID = 1 << 7,
1572};
1573
1574struct nfsd4_operation {
1575 __be32 (*op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
1576 union nfsd4_op_u *);
1577 u32 op_flags;
1578 char *op_name;
1579 /* Try to get response size before operation */
1580 u32 (*op_rsize_bop)(struct svc_rqst *, struct nfsd4_op *);
1581 void (*op_get_currentstateid)(struct nfsd4_compound_state *,
1582 union nfsd4_op_u *);
1583 void (*op_set_currentstateid)(struct nfsd4_compound_state *,
1584 union nfsd4_op_u *);
1585};
1586
1587static const struct nfsd4_operation nfsd4_ops[]; 1571static const struct nfsd4_operation nfsd4_ops[];
1588 1572
1589static const char *nfsd4_op_name(unsigned opnum); 1573static const char *nfsd4_op_name(unsigned opnum);
@@ -1621,7 +1605,7 @@ static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
1621 return nfs_ok; 1605 return nfs_ok;
1622} 1606}
1623 1607
1624static inline const struct nfsd4_operation *OPDESC(struct nfsd4_op *op) 1608const struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
1625{ 1609{
1626 return &nfsd4_ops[op->opnum]; 1610 return &nfsd4_ops[op->opnum];
1627} 1611}
@@ -1694,7 +1678,6 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1694 struct nfsd4_compoundargs *args = rqstp->rq_argp; 1678 struct nfsd4_compoundargs *args = rqstp->rq_argp;
1695 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1679 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1696 struct nfsd4_op *op; 1680 struct nfsd4_op *op;
1697 const struct nfsd4_operation *opdesc;
1698 struct nfsd4_compound_state *cstate = &resp->cstate; 1681 struct nfsd4_compound_state *cstate = &resp->cstate;
1699 struct svc_fh *current_fh = &cstate->current_fh; 1682 struct svc_fh *current_fh = &cstate->current_fh;
1700 struct svc_fh *save_fh = &cstate->save_fh; 1683 struct svc_fh *save_fh = &cstate->save_fh;
@@ -1747,15 +1730,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1747 goto encode_op; 1730 goto encode_op;
1748 } 1731 }
1749 1732
1750 opdesc = OPDESC(op);
1751
1752 if (!current_fh->fh_dentry) { 1733 if (!current_fh->fh_dentry) {
1753 if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) { 1734 if (!(op->opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
1754 op->status = nfserr_nofilehandle; 1735 op->status = nfserr_nofilehandle;
1755 goto encode_op; 1736 goto encode_op;
1756 } 1737 }
1757 } else if (current_fh->fh_export->ex_fslocs.migrated && 1738 } else if (current_fh->fh_export->ex_fslocs.migrated &&
1758 !(opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) { 1739 !(op->opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
1759 op->status = nfserr_moved; 1740 op->status = nfserr_moved;
1760 goto encode_op; 1741 goto encode_op;
1761 } 1742 }
@@ -1763,12 +1744,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1763 fh_clear_wcc(current_fh); 1744 fh_clear_wcc(current_fh);
1764 1745
1765 /* If op is non-idempotent */ 1746 /* If op is non-idempotent */
1766 if (opdesc->op_flags & OP_MODIFIES_SOMETHING) { 1747 if (op->opdesc->op_flags & OP_MODIFIES_SOMETHING) {
1767 /* 1748 /*
1768 * Don't execute this op if we couldn't encode a 1749 * Don't execute this op if we couldn't encode a
1769 * succesful reply: 1750 * succesful reply:
1770 */ 1751 */
1771 u32 plen = opdesc->op_rsize_bop(rqstp, op); 1752 u32 plen = op->opdesc->op_rsize_bop(rqstp, op);
1772 /* 1753 /*
1773 * Plus if there's another operation, make sure 1754 * Plus if there's another operation, make sure
1774 * we'll have space to at least encode an error: 1755 * we'll have space to at least encode an error:
@@ -1781,9 +1762,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1781 if (op->status) 1762 if (op->status)
1782 goto encode_op; 1763 goto encode_op;
1783 1764
1784 if (opdesc->op_get_currentstateid) 1765 if (op->opdesc->op_get_currentstateid)
1785 opdesc->op_get_currentstateid(cstate, &op->u); 1766 op->opdesc->op_get_currentstateid(cstate, &op->u);
1786 op->status = opdesc->op_func(rqstp, cstate, &op->u); 1767 op->status = op->opdesc->op_func(rqstp, cstate, &op->u);
1787 1768
1788 /* Only from SEQUENCE */ 1769 /* Only from SEQUENCE */
1789 if (cstate->status == nfserr_replay_cache) { 1770 if (cstate->status == nfserr_replay_cache) {
@@ -1792,10 +1773,10 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
1792 goto out; 1773 goto out;
1793 } 1774 }
1794 if (!op->status) { 1775 if (!op->status) {
1795 if (opdesc->op_set_currentstateid) 1776 if (op->opdesc->op_set_currentstateid)
1796 opdesc->op_set_currentstateid(cstate, &op->u); 1777 op->opdesc->op_set_currentstateid(cstate, &op->u);
1797 1778
1798 if (opdesc->op_flags & OP_CLEAR_STATEID) 1779 if (op->opdesc->op_flags & OP_CLEAR_STATEID)
1799 clear_current_stateid(cstate); 1780 clear_current_stateid(cstate);
1800 1781
1801 if (need_wrongsec_check(rqstp)) 1782 if (need_wrongsec_check(rqstp))
@@ -2160,13 +2141,15 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2160 }, 2141 },
2161 [OP_LOCK] = { 2142 [OP_LOCK] = {
2162 .op_func = nfsd4_lock, 2143 .op_func = nfsd4_lock,
2163 .op_flags = OP_MODIFIES_SOMETHING, 2144 .op_flags = OP_MODIFIES_SOMETHING |
2145 OP_NONTRIVIAL_ERROR_ENCODE,
2164 .op_name = "OP_LOCK", 2146 .op_name = "OP_LOCK",
2165 .op_rsize_bop = nfsd4_lock_rsize, 2147 .op_rsize_bop = nfsd4_lock_rsize,
2166 .op_set_currentstateid = nfsd4_set_lockstateid, 2148 .op_set_currentstateid = nfsd4_set_lockstateid,
2167 }, 2149 },
2168 [OP_LOCKT] = { 2150 [OP_LOCKT] = {
2169 .op_func = nfsd4_lockt, 2151 .op_func = nfsd4_lockt,
2152 .op_flags = OP_NONTRIVIAL_ERROR_ENCODE,
2170 .op_name = "OP_LOCKT", 2153 .op_name = "OP_LOCKT",
2171 .op_rsize_bop = nfsd4_lock_rsize, 2154 .op_rsize_bop = nfsd4_lock_rsize,
2172 }, 2155 },
@@ -2238,6 +2221,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2238 }, 2221 },
2239 [OP_READ] = { 2222 [OP_READ] = {
2240 .op_func = nfsd4_read, 2223 .op_func = nfsd4_read,
2224 .op_release = nfsd4_read_release,
2241 .op_name = "OP_READ", 2225 .op_name = "OP_READ",
2242 .op_rsize_bop = nfsd4_read_rsize, 2226 .op_rsize_bop = nfsd4_read_rsize,
2243 .op_get_currentstateid = nfsd4_get_readstateid, 2227 .op_get_currentstateid = nfsd4_get_readstateid,
@@ -2287,6 +2271,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2287 }, 2271 },
2288 [OP_SECINFO] = { 2272 [OP_SECINFO] = {
2289 .op_func = nfsd4_secinfo, 2273 .op_func = nfsd4_secinfo,
2274 .op_release = nfsd4_secinfo_release,
2290 .op_flags = OP_HANDLES_WRONGSEC, 2275 .op_flags = OP_HANDLES_WRONGSEC,
2291 .op_name = "OP_SECINFO", 2276 .op_name = "OP_SECINFO",
2292 .op_rsize_bop = nfsd4_secinfo_rsize, 2277 .op_rsize_bop = nfsd4_secinfo_rsize,
@@ -2294,14 +2279,16 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2294 [OP_SETATTR] = { 2279 [OP_SETATTR] = {
2295 .op_func = nfsd4_setattr, 2280 .op_func = nfsd4_setattr,
2296 .op_name = "OP_SETATTR", 2281 .op_name = "OP_SETATTR",
2297 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, 2282 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME
2283 | OP_NONTRIVIAL_ERROR_ENCODE,
2298 .op_rsize_bop = nfsd4_setattr_rsize, 2284 .op_rsize_bop = nfsd4_setattr_rsize,
2299 .op_get_currentstateid = nfsd4_get_setattrstateid, 2285 .op_get_currentstateid = nfsd4_get_setattrstateid,
2300 }, 2286 },
2301 [OP_SETCLIENTID] = { 2287 [OP_SETCLIENTID] = {
2302 .op_func = nfsd4_setclientid, 2288 .op_func = nfsd4_setclientid,
2303 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS 2289 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
2304 | OP_MODIFIES_SOMETHING | OP_CACHEME, 2290 | OP_MODIFIES_SOMETHING | OP_CACHEME
2291 | OP_NONTRIVIAL_ERROR_ENCODE,
2305 .op_name = "OP_SETCLIENTID", 2292 .op_name = "OP_SETCLIENTID",
2306 .op_rsize_bop = nfsd4_setclientid_rsize, 2293 .op_rsize_bop = nfsd4_setclientid_rsize,
2307 }, 2294 },
@@ -2388,6 +2375,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2388 }, 2375 },
2389 [OP_SECINFO_NO_NAME] = { 2376 [OP_SECINFO_NO_NAME] = {
2390 .op_func = nfsd4_secinfo_no_name, 2377 .op_func = nfsd4_secinfo_no_name,
2378 .op_release = nfsd4_secinfo_release,
2391 .op_flags = OP_HANDLES_WRONGSEC, 2379 .op_flags = OP_HANDLES_WRONGSEC,
2392 .op_name = "OP_SECINFO_NO_NAME", 2380 .op_name = "OP_SECINFO_NO_NAME",
2393 .op_rsize_bop = nfsd4_secinfo_rsize, 2381 .op_rsize_bop = nfsd4_secinfo_rsize,
@@ -2408,12 +2396,14 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2408#ifdef CONFIG_NFSD_PNFS 2396#ifdef CONFIG_NFSD_PNFS
2409 [OP_GETDEVICEINFO] = { 2397 [OP_GETDEVICEINFO] = {
2410 .op_func = nfsd4_getdeviceinfo, 2398 .op_func = nfsd4_getdeviceinfo,
2399 .op_release = nfsd4_getdeviceinfo_release,
2411 .op_flags = ALLOWED_WITHOUT_FH, 2400 .op_flags = ALLOWED_WITHOUT_FH,
2412 .op_name = "OP_GETDEVICEINFO", 2401 .op_name = "OP_GETDEVICEINFO",
2413 .op_rsize_bop = nfsd4_getdeviceinfo_rsize, 2402 .op_rsize_bop = nfsd4_getdeviceinfo_rsize,
2414 }, 2403 },
2415 [OP_LAYOUTGET] = { 2404 [OP_LAYOUTGET] = {
2416 .op_func = nfsd4_layoutget, 2405 .op_func = nfsd4_layoutget,
2406 .op_release = nfsd4_layoutget_release,
2417 .op_flags = OP_MODIFIES_SOMETHING, 2407 .op_flags = OP_MODIFIES_SOMETHING,
2418 .op_name = "OP_LAYOUTGET", 2408 .op_name = "OP_LAYOUTGET",
2419 .op_rsize_bop = nfsd4_layoutget_rsize, 2409 .op_rsize_bop = nfsd4_layoutget_rsize,
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 5f940d2a136b..2c61c6b8ae09 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -159,6 +159,25 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
159 */ 159 */
160 unsigned int avail = (char *)argp->end - (char *)argp->p; 160 unsigned int avail = (char *)argp->end - (char *)argp->p;
161 __be32 *p; 161 __be32 *p;
162
163 if (argp->pagelen == 0) {
164 struct kvec *vec = &argp->rqstp->rq_arg.tail[0];
165
166 if (!argp->tail) {
167 argp->tail = true;
168 avail = vec->iov_len;
169 argp->p = vec->iov_base;
170 argp->end = vec->iov_base + avail;
171 }
172
173 if (avail < nbytes)
174 return NULL;
175
176 p = argp->p;
177 argp->p += XDR_QUADLEN(nbytes);
178 return p;
179 }
180
162 if (avail + argp->pagelen < nbytes) 181 if (avail + argp->pagelen < nbytes)
163 return NULL; 182 return NULL;
164 if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */ 183 if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
@@ -1778,7 +1797,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
1778 1797
1779typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *); 1798typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
1780 1799
1781static nfsd4_dec nfsd4_dec_ops[] = { 1800static const nfsd4_dec nfsd4_dec_ops[] = {
1782 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access, 1801 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
1783 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close, 1802 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
1784 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit, 1803 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
@@ -1927,6 +1946,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1927 op->opnum = OP_ILLEGAL; 1946 op->opnum = OP_ILLEGAL;
1928 op->status = nfserr_op_illegal; 1947 op->status = nfserr_op_illegal;
1929 } 1948 }
1949 op->opdesc = OPDESC(op);
1930 /* 1950 /*
1931 * We'll try to cache the result in the DRC if any one 1951 * We'll try to cache the result in the DRC if any one
1932 * op in the compound wants to be cached: 1952 * op in the compound wants to be cached:
@@ -3102,14 +3122,12 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
3102 struct xdr_stream *xdr = &resp->xdr; 3122 struct xdr_stream *xdr = &resp->xdr;
3103 __be32 *p; 3123 __be32 *p;
3104 3124
3105 if (!nfserr) { 3125 p = xdr_reserve_space(xdr, 8);
3106 p = xdr_reserve_space(xdr, 8); 3126 if (!p)
3107 if (!p) 3127 return nfserr_resource;
3108 return nfserr_resource; 3128 *p++ = cpu_to_be32(access->ac_supported);
3109 *p++ = cpu_to_be32(access->ac_supported); 3129 *p++ = cpu_to_be32(access->ac_resp_access);
3110 *p++ = cpu_to_be32(access->ac_resp_access); 3130 return 0;
3111 }
3112 return nfserr;
3113} 3131}
3114 3132
3115static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts) 3133static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
@@ -3117,17 +3135,15 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
3117 struct xdr_stream *xdr = &resp->xdr; 3135 struct xdr_stream *xdr = &resp->xdr;
3118 __be32 *p; 3136 __be32 *p;
3119 3137
3120 if (!nfserr) { 3138 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
3121 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8); 3139 if (!p)
3122 if (!p) 3140 return nfserr_resource;
3123 return nfserr_resource; 3141 p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
3124 p = xdr_encode_opaque_fixed(p, bcts->sessionid.data, 3142 NFS4_MAX_SESSIONID_LEN);
3125 NFS4_MAX_SESSIONID_LEN); 3143 *p++ = cpu_to_be32(bcts->dir);
3126 *p++ = cpu_to_be32(bcts->dir); 3144 /* Upshifting from TCP to RDMA is not supported */
3127 /* Upshifting from TCP to RDMA is not supported */ 3145 *p++ = cpu_to_be32(0);
3128 *p++ = cpu_to_be32(0); 3146 return 0;
3129 }
3130 return nfserr;
3131} 3147}
3132 3148
3133static __be32 3149static __be32
@@ -3135,10 +3151,7 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
3135{ 3151{
3136 struct xdr_stream *xdr = &resp->xdr; 3152 struct xdr_stream *xdr = &resp->xdr;
3137 3153
3138 if (!nfserr) 3154 return nfsd4_encode_stateid(xdr, &close->cl_stateid);
3139 nfserr = nfsd4_encode_stateid(xdr, &close->cl_stateid);
3140
3141 return nfserr;
3142} 3155}
3143 3156
3144 3157
@@ -3148,14 +3161,12 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
3148 struct xdr_stream *xdr = &resp->xdr; 3161 struct xdr_stream *xdr = &resp->xdr;
3149 __be32 *p; 3162 __be32 *p;
3150 3163
3151 if (!nfserr) { 3164 p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
3152 p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); 3165 if (!p)
3153 if (!p) 3166 return nfserr_resource;
3154 return nfserr_resource; 3167 p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
3155 p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
3156 NFS4_VERIFIER_SIZE); 3168 NFS4_VERIFIER_SIZE);
3157 } 3169 return 0;
3158 return nfserr;
3159} 3170}
3160 3171
3161static __be32 3172static __be32
@@ -3164,15 +3175,13 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
3164 struct xdr_stream *xdr = &resp->xdr; 3175 struct xdr_stream *xdr = &resp->xdr;
3165 __be32 *p; 3176 __be32 *p;
3166 3177
3167 if (!nfserr) { 3178 p = xdr_reserve_space(xdr, 20);
3168 p = xdr_reserve_space(xdr, 20); 3179 if (!p)
3169 if (!p) 3180 return nfserr_resource;
3170 return nfserr_resource; 3181 encode_cinfo(p, &create->cr_cinfo);
3171 encode_cinfo(p, &create->cr_cinfo); 3182 nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
3172 nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0], 3183 create->cr_bmval[1], create->cr_bmval[2]);
3173 create->cr_bmval[1], create->cr_bmval[2]); 3184 return 0;
3174 }
3175 return nfserr;
3176} 3185}
3177 3186
3178static __be32 3187static __be32
@@ -3181,13 +3190,8 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3181 struct svc_fh *fhp = getattr->ga_fhp; 3190 struct svc_fh *fhp = getattr->ga_fhp;
3182 struct xdr_stream *xdr = &resp->xdr; 3191 struct xdr_stream *xdr = &resp->xdr;
3183 3192
3184 if (nfserr) 3193 return nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
3185 return nfserr; 3194 getattr->ga_bmval, resp->rqstp, 0);
3186
3187 nfserr = nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
3188 getattr->ga_bmval,
3189 resp->rqstp, 0);
3190 return nfserr;
3191} 3195}
3192 3196
3193static __be32 3197static __be32
@@ -3198,14 +3202,12 @@ nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh
3198 unsigned int len; 3202 unsigned int len;
3199 __be32 *p; 3203 __be32 *p;
3200 3204
3201 if (!nfserr) { 3205 len = fhp->fh_handle.fh_size;
3202 len = fhp->fh_handle.fh_size; 3206 p = xdr_reserve_space(xdr, len + 4);
3203 p = xdr_reserve_space(xdr, len + 4); 3207 if (!p)
3204 if (!p) 3208 return nfserr_resource;
3205 return nfserr_resource; 3209 p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base, len);
3206 p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base, len); 3210 return 0;
3207 }
3208 return nfserr;
3209} 3211}
3210 3212
3211/* 3213/*
@@ -3275,10 +3277,7 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
3275{ 3277{
3276 struct xdr_stream *xdr = &resp->xdr; 3278 struct xdr_stream *xdr = &resp->xdr;
3277 3279
3278 if (!nfserr) 3280 return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
3279 nfserr = nfsd4_encode_stateid(xdr, &locku->lu_stateid);
3280
3281 return nfserr;
3282} 3281}
3283 3282
3284 3283
@@ -3288,13 +3287,11 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_li
3288 struct xdr_stream *xdr = &resp->xdr; 3287 struct xdr_stream *xdr = &resp->xdr;
3289 __be32 *p; 3288 __be32 *p;
3290 3289
3291 if (!nfserr) { 3290 p = xdr_reserve_space(xdr, 20);
3292 p = xdr_reserve_space(xdr, 20); 3291 if (!p)
3293 if (!p) 3292 return nfserr_resource;
3294 return nfserr_resource; 3293 p = encode_cinfo(p, &link->li_cinfo);
3295 p = encode_cinfo(p, &link->li_cinfo); 3294 return 0;
3296 }
3297 return nfserr;
3298} 3295}
3299 3296
3300 3297
@@ -3304,12 +3301,9 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
3304 struct xdr_stream *xdr = &resp->xdr; 3301 struct xdr_stream *xdr = &resp->xdr;
3305 __be32 *p; 3302 __be32 *p;
3306 3303
3307 if (nfserr)
3308 goto out;
3309
3310 nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid); 3304 nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
3311 if (nfserr) 3305 if (nfserr)
3312 goto out; 3306 return nfserr;
3313 p = xdr_reserve_space(xdr, 24); 3307 p = xdr_reserve_space(xdr, 24);
3314 if (!p) 3308 if (!p)
3315 return nfserr_resource; 3309 return nfserr_resource;
@@ -3319,7 +3313,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
3319 nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1], 3313 nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
3320 open->op_bmval[2]); 3314 open->op_bmval[2]);
3321 if (nfserr) 3315 if (nfserr)
3322 goto out; 3316 return nfserr;
3323 3317
3324 p = xdr_reserve_space(xdr, 4); 3318 p = xdr_reserve_space(xdr, 4);
3325 if (!p) 3319 if (!p)
@@ -3392,8 +3386,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
3392 BUG(); 3386 BUG();
3393 } 3387 }
3394 /* XXX save filehandle here */ 3388 /* XXX save filehandle here */
3395out: 3389 return 0;
3396 return nfserr;
3397} 3390}
3398 3391
3399static __be32 3392static __be32
@@ -3401,10 +3394,7 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct
3401{ 3394{
3402 struct xdr_stream *xdr = &resp->xdr; 3395 struct xdr_stream *xdr = &resp->xdr;
3403 3396
3404 if (!nfserr) 3397 return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
3405 nfserr = nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
3406
3407 return nfserr;
3408} 3398}
3409 3399
3410static __be32 3400static __be32
@@ -3412,10 +3402,7 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struc
3412{ 3402{
3413 struct xdr_stream *xdr = &resp->xdr; 3403 struct xdr_stream *xdr = &resp->xdr;
3414 3404
3415 if (!nfserr) 3405 return nfsd4_encode_stateid(xdr, &od->od_stateid);
3416 nfserr = nfsd4_encode_stateid(xdr, &od->od_stateid);
3417
3418 return nfserr;
3419} 3406}
3420 3407
3421static __be32 nfsd4_encode_splice_read( 3408static __be32 nfsd4_encode_splice_read(
@@ -3552,20 +3539,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
3552 struct raparms *ra = NULL; 3539 struct raparms *ra = NULL;
3553 __be32 *p; 3540 __be32 *p;
3554 3541
3555 if (nfserr)
3556 goto out;
3557
3558 p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */ 3542 p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
3559 if (!p) { 3543 if (!p) {
3560 WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)); 3544 WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
3561 nfserr = nfserr_resource; 3545 return nfserr_resource;
3562 goto out;
3563 } 3546 }
3564 if (resp->xdr.buf->page_len && 3547 if (resp->xdr.buf->page_len &&
3565 test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) { 3548 test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
3566 WARN_ON_ONCE(1); 3549 WARN_ON_ONCE(1);
3567 nfserr = nfserr_resource; 3550 return nfserr_resource;
3568 goto out;
3569 } 3551 }
3570 xdr_commit_encode(xdr); 3552 xdr_commit_encode(xdr);
3571 3553
@@ -3589,9 +3571,6 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
3589 if (nfserr) 3571 if (nfserr)
3590 xdr_truncate_encode(xdr, starting_len); 3572 xdr_truncate_encode(xdr, starting_len);
3591 3573
3592out:
3593 if (file)
3594 fput(file);
3595 return nfserr; 3574 return nfserr;
3596} 3575}
3597 3576
@@ -3605,9 +3584,6 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
3605 int length_offset = xdr->buf->len; 3584 int length_offset = xdr->buf->len;
3606 __be32 *p; 3585 __be32 *p;
3607 3586
3608 if (nfserr)
3609 return nfserr;
3610
3611 p = xdr_reserve_space(xdr, 4); 3587 p = xdr_reserve_space(xdr, 4);
3612 if (!p) 3588 if (!p)
3613 return nfserr_resource; 3589 return nfserr_resource;
@@ -3651,9 +3627,6 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3651 int starting_len = xdr->buf->len; 3627 int starting_len = xdr->buf->len;
3652 __be32 *p; 3628 __be32 *p;
3653 3629
3654 if (nfserr)
3655 return nfserr;
3656
3657 p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); 3630 p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
3658 if (!p) 3631 if (!p)
3659 return nfserr_resource; 3632 return nfserr_resource;
@@ -3739,13 +3712,11 @@ nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
3739 struct xdr_stream *xdr = &resp->xdr; 3712 struct xdr_stream *xdr = &resp->xdr;
3740 __be32 *p; 3713 __be32 *p;
3741 3714
3742 if (!nfserr) { 3715 p = xdr_reserve_space(xdr, 20);
3743 p = xdr_reserve_space(xdr, 20); 3716 if (!p)
3744 if (!p) 3717 return nfserr_resource;
3745 return nfserr_resource; 3718 p = encode_cinfo(p, &remove->rm_cinfo);
3746 p = encode_cinfo(p, &remove->rm_cinfo); 3719 return 0;
3747 }
3748 return nfserr;
3749} 3720}
3750 3721
3751static __be32 3722static __be32
@@ -3754,19 +3725,16 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
3754 struct xdr_stream *xdr = &resp->xdr; 3725 struct xdr_stream *xdr = &resp->xdr;
3755 __be32 *p; 3726 __be32 *p;
3756 3727
3757 if (!nfserr) { 3728 p = xdr_reserve_space(xdr, 40);
3758 p = xdr_reserve_space(xdr, 40); 3729 if (!p)
3759 if (!p) 3730 return nfserr_resource;
3760 return nfserr_resource; 3731 p = encode_cinfo(p, &rename->rn_sinfo);
3761 p = encode_cinfo(p, &rename->rn_sinfo); 3732 p = encode_cinfo(p, &rename->rn_tinfo);
3762 p = encode_cinfo(p, &rename->rn_tinfo); 3733 return 0;
3763 }
3764 return nfserr;
3765} 3734}
3766 3735
3767static __be32 3736static __be32
3768nfsd4_do_encode_secinfo(struct xdr_stream *xdr, 3737nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
3769 __be32 nfserr, struct svc_export *exp)
3770{ 3738{
3771 u32 i, nflavs, supported; 3739 u32 i, nflavs, supported;
3772 struct exp_flavor_info *flavs; 3740 struct exp_flavor_info *flavs;
@@ -3774,9 +3742,6 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
3774 __be32 *p, *flavorsp; 3742 __be32 *p, *flavorsp;
3775 static bool report = true; 3743 static bool report = true;
3776 3744
3777 if (nfserr)
3778 goto out;
3779 nfserr = nfserr_resource;
3780 if (exp->ex_nflavors) { 3745 if (exp->ex_nflavors) {
3781 flavs = exp->ex_flavors; 3746 flavs = exp->ex_flavors;
3782 nflavs = exp->ex_nflavors; 3747 nflavs = exp->ex_nflavors;
@@ -3800,7 +3765,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
3800 supported = 0; 3765 supported = 0;
3801 p = xdr_reserve_space(xdr, 4); 3766 p = xdr_reserve_space(xdr, 4);
3802 if (!p) 3767 if (!p)
3803 goto out; 3768 return nfserr_resource;
3804 flavorsp = p++; /* to be backfilled later */ 3769 flavorsp = p++; /* to be backfilled later */
3805 3770
3806 for (i = 0; i < nflavs; i++) { 3771 for (i = 0; i < nflavs; i++) {
@@ -3812,7 +3777,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
3812 p = xdr_reserve_space(xdr, 4 + 4 + 3777 p = xdr_reserve_space(xdr, 4 + 4 +
3813 XDR_LEN(info.oid.len) + 4 + 4); 3778 XDR_LEN(info.oid.len) + 4 + 4);
3814 if (!p) 3779 if (!p)
3815 goto out; 3780 return nfserr_resource;
3816 *p++ = cpu_to_be32(RPC_AUTH_GSS); 3781 *p++ = cpu_to_be32(RPC_AUTH_GSS);
3817 p = xdr_encode_opaque(p, info.oid.data, info.oid.len); 3782 p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
3818 *p++ = cpu_to_be32(info.qop); 3783 *p++ = cpu_to_be32(info.qop);
@@ -3821,7 +3786,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
3821 supported++; 3786 supported++;
3822 p = xdr_reserve_space(xdr, 4); 3787 p = xdr_reserve_space(xdr, 4);
3823 if (!p) 3788 if (!p)
3824 goto out; 3789 return nfserr_resource;
3825 *p++ = cpu_to_be32(pf); 3790 *p++ = cpu_to_be32(pf);
3826 } else { 3791 } else {
3827 if (report) 3792 if (report)
@@ -3833,11 +3798,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
3833 if (nflavs != supported) 3798 if (nflavs != supported)
3834 report = false; 3799 report = false;
3835 *flavorsp = htonl(supported); 3800 *flavorsp = htonl(supported);
3836 nfserr = 0; 3801 return 0;
3837out:
3838 if (exp)
3839 exp_put(exp);
3840 return nfserr;
3841} 3802}
3842 3803
3843static __be32 3804static __be32
@@ -3846,7 +3807,7 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
3846{ 3807{
3847 struct xdr_stream *xdr = &resp->xdr; 3808 struct xdr_stream *xdr = &resp->xdr;
3848 3809
3849 return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->si_exp); 3810 return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
3850} 3811}
3851 3812
3852static __be32 3813static __be32
@@ -3855,7 +3816,7 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
3855{ 3816{
3856 struct xdr_stream *xdr = &resp->xdr; 3817 struct xdr_stream *xdr = &resp->xdr;
3857 3818
3858 return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->sin_exp); 3819 return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
3859} 3820}
3860 3821
3861/* 3822/*
@@ -3916,16 +3877,14 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
3916 struct xdr_stream *xdr = &resp->xdr; 3877 struct xdr_stream *xdr = &resp->xdr;
3917 __be32 *p; 3878 __be32 *p;
3918 3879
3919 if (!nfserr) { 3880 p = xdr_reserve_space(xdr, 16);
3920 p = xdr_reserve_space(xdr, 16); 3881 if (!p)
3921 if (!p) 3882 return nfserr_resource;
3922 return nfserr_resource; 3883 *p++ = cpu_to_be32(write->wr_bytes_written);
3923 *p++ = cpu_to_be32(write->wr_bytes_written); 3884 *p++ = cpu_to_be32(write->wr_how_written);
3924 *p++ = cpu_to_be32(write->wr_how_written); 3885 p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
3925 p = xdr_encode_opaque_fixed(p, write->wr_verifier.data, 3886 NFS4_VERIFIER_SIZE);
3926 NFS4_VERIFIER_SIZE); 3887 return 0;
3927 }
3928 return nfserr;
3929} 3888}
3930 3889
3931static __be32 3890static __be32
@@ -3938,12 +3897,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
3938 char *server_scope; 3897 char *server_scope;
3939 int major_id_sz; 3898 int major_id_sz;
3940 int server_scope_sz; 3899 int server_scope_sz;
3941 int status = 0;
3942 uint64_t minor_id = 0; 3900 uint64_t minor_id = 0;
3943 3901
3944 if (nfserr)
3945 return nfserr;
3946
3947 major_id = utsname()->nodename; 3902 major_id = utsname()->nodename;
3948 major_id_sz = strlen(major_id); 3903 major_id_sz = strlen(major_id);
3949 server_scope = utsname()->nodename; 3904 server_scope = utsname()->nodename;
@@ -3968,19 +3923,19 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
3968 break; 3923 break;
3969 case SP4_MACH_CRED: 3924 case SP4_MACH_CRED:
3970 /* spo_must_enforce bitmap: */ 3925 /* spo_must_enforce bitmap: */
3971 status = nfsd4_encode_bitmap(xdr, 3926 nfserr = nfsd4_encode_bitmap(xdr,
3972 exid->spo_must_enforce[0], 3927 exid->spo_must_enforce[0],
3973 exid->spo_must_enforce[1], 3928 exid->spo_must_enforce[1],
3974 exid->spo_must_enforce[2]); 3929 exid->spo_must_enforce[2]);
3975 if (status) 3930 if (nfserr)
3976 goto out; 3931 return nfserr;
3977 /* spo_must_allow bitmap: */ 3932 /* spo_must_allow bitmap: */
3978 status = nfsd4_encode_bitmap(xdr, 3933 nfserr = nfsd4_encode_bitmap(xdr,
3979 exid->spo_must_allow[0], 3934 exid->spo_must_allow[0],
3980 exid->spo_must_allow[1], 3935 exid->spo_must_allow[1],
3981 exid->spo_must_allow[2]); 3936 exid->spo_must_allow[2]);
3982 if (status) 3937 if (nfserr)
3983 goto out; 3938 return nfserr;
3984 break; 3939 break;
3985 default: 3940 default:
3986 WARN_ON_ONCE(1); 3941 WARN_ON_ONCE(1);
@@ -4007,8 +3962,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
4007 /* Implementation id */ 3962 /* Implementation id */
4008 *p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */ 3963 *p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
4009 return 0; 3964 return 0;
4010out:
4011 return status;
4012} 3965}
4013 3966
4014static __be32 3967static __be32
@@ -4018,9 +3971,6 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
4018 struct xdr_stream *xdr = &resp->xdr; 3971 struct xdr_stream *xdr = &resp->xdr;
4019 __be32 *p; 3972 __be32 *p;
4020 3973
4021 if (nfserr)
4022 return nfserr;
4023
4024 p = xdr_reserve_space(xdr, 24); 3974 p = xdr_reserve_space(xdr, 24);
4025 if (!p) 3975 if (!p)
4026 return nfserr_resource; 3976 return nfserr_resource;
@@ -4074,9 +4024,6 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
4074 struct xdr_stream *xdr = &resp->xdr; 4024 struct xdr_stream *xdr = &resp->xdr;
4075 __be32 *p; 4025 __be32 *p;
4076 4026
4077 if (nfserr)
4078 return nfserr;
4079
4080 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20); 4027 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
4081 if (!p) 4028 if (!p)
4082 return nfserr_resource; 4029 return nfserr_resource;
@@ -4101,9 +4048,6 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
4101 struct nfsd4_test_stateid_id *stateid, *next; 4048 struct nfsd4_test_stateid_id *stateid, *next;
4102 __be32 *p; 4049 __be32 *p;
4103 4050
4104 if (nfserr)
4105 return nfserr;
4106
4107 p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids)); 4051 p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
4108 if (!p) 4052 if (!p)
4109 return nfserr_resource; 4053 return nfserr_resource;
@@ -4113,7 +4057,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
4113 *p++ = stateid->ts_id_status; 4057 *p++ = stateid->ts_id_status;
4114 } 4058 }
4115 4059
4116 return nfserr; 4060 return 0;
4117} 4061}
4118 4062
4119#ifdef CONFIG_NFSD_PNFS 4063#ifdef CONFIG_NFSD_PNFS
@@ -4126,14 +4070,9 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
4126 u32 starting_len = xdr->buf->len, needed_len; 4070 u32 starting_len = xdr->buf->len, needed_len;
4127 __be32 *p; 4071 __be32 *p;
4128 4072
4129 dprintk("%s: err %d\n", __func__, be32_to_cpu(nfserr));
4130 if (nfserr)
4131 goto out;
4132
4133 nfserr = nfserr_resource;
4134 p = xdr_reserve_space(xdr, 4); 4073 p = xdr_reserve_space(xdr, 4);
4135 if (!p) 4074 if (!p)
4136 goto out; 4075 return nfserr_resource;
4137 4076
4138 *p++ = cpu_to_be32(gdev->gd_layout_type); 4077 *p++ = cpu_to_be32(gdev->gd_layout_type);
4139 4078
@@ -4149,42 +4088,33 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
4149 */ 4088 */
4150 if (xdr->buf->len + 4 > gdev->gd_maxcount) 4089 if (xdr->buf->len + 4 > gdev->gd_maxcount)
4151 goto toosmall; 4090 goto toosmall;
4152 goto out; 4091 return nfserr;
4153 } 4092 }
4154 } 4093 }
4155 4094
4156 nfserr = nfserr_resource;
4157 if (gdev->gd_notify_types) { 4095 if (gdev->gd_notify_types) {
4158 p = xdr_reserve_space(xdr, 4 + 4); 4096 p = xdr_reserve_space(xdr, 4 + 4);
4159 if (!p) 4097 if (!p)
4160 goto out; 4098 return nfserr_resource;
4161 *p++ = cpu_to_be32(1); /* bitmap length */ 4099 *p++ = cpu_to_be32(1); /* bitmap length */
4162 *p++ = cpu_to_be32(gdev->gd_notify_types); 4100 *p++ = cpu_to_be32(gdev->gd_notify_types);
4163 } else { 4101 } else {
4164 p = xdr_reserve_space(xdr, 4); 4102 p = xdr_reserve_space(xdr, 4);
4165 if (!p) 4103 if (!p)
4166 goto out; 4104 return nfserr_resource;
4167 *p++ = 0; 4105 *p++ = 0;
4168 } 4106 }
4169 4107
4170 nfserr = 0; 4108 return 0;
4171out:
4172 kfree(gdev->gd_device);
4173 dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr));
4174 return nfserr;
4175
4176toosmall: 4109toosmall:
4177 dprintk("%s: maxcount too small\n", __func__); 4110 dprintk("%s: maxcount too small\n", __func__);
4178 needed_len = xdr->buf->len + 4 /* notifications */; 4111 needed_len = xdr->buf->len + 4 /* notifications */;
4179 xdr_truncate_encode(xdr, starting_len); 4112 xdr_truncate_encode(xdr, starting_len);
4180 p = xdr_reserve_space(xdr, 4); 4113 p = xdr_reserve_space(xdr, 4);
4181 if (!p) { 4114 if (!p)
4182 nfserr = nfserr_resource; 4115 return nfserr_resource;
4183 } else { 4116 *p++ = cpu_to_be32(needed_len);
4184 *p++ = cpu_to_be32(needed_len); 4117 return nfserr_toosmall;
4185 nfserr = nfserr_toosmall;
4186 }
4187 goto out;
4188} 4118}
4189 4119
4190static __be32 4120static __be32
@@ -4195,14 +4125,9 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
4195 const struct nfsd4_layout_ops *ops; 4125 const struct nfsd4_layout_ops *ops;
4196 __be32 *p; 4126 __be32 *p;
4197 4127
4198 dprintk("%s: err %d\n", __func__, nfserr);
4199 if (nfserr)
4200 goto out;
4201
4202 nfserr = nfserr_resource;
4203 p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t)); 4128 p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
4204 if (!p) 4129 if (!p)
4205 goto out; 4130 return nfserr_resource;
4206 4131
4207 *p++ = cpu_to_be32(1); /* we always set return-on-close */ 4132 *p++ = cpu_to_be32(1); /* we always set return-on-close */
4208 *p++ = cpu_to_be32(lgp->lg_sid.si_generation); 4133 *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
@@ -4216,10 +4141,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
4216 *p++ = cpu_to_be32(lgp->lg_layout_type); 4141 *p++ = cpu_to_be32(lgp->lg_layout_type);
4217 4142
4218 ops = nfsd4_layout_ops[lgp->lg_layout_type]; 4143 ops = nfsd4_layout_ops[lgp->lg_layout_type];
4219 nfserr = ops->encode_layoutget(xdr, lgp); 4144 return ops->encode_layoutget(xdr, lgp);
4220out:
4221 kfree(lgp->lg_content);
4222 return nfserr;
4223} 4145}
4224 4146
4225static __be32 4147static __be32
@@ -4229,9 +4151,6 @@ nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
4229 struct xdr_stream *xdr = &resp->xdr; 4151 struct xdr_stream *xdr = &resp->xdr;
4230 __be32 *p; 4152 __be32 *p;
4231 4153
4232 if (nfserr)
4233 return nfserr;
4234
4235 p = xdr_reserve_space(xdr, 4); 4154 p = xdr_reserve_space(xdr, 4);
4236 if (!p) 4155 if (!p)
4237 return nfserr_resource; 4156 return nfserr_resource;
@@ -4243,7 +4162,7 @@ nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
4243 p = xdr_encode_hyper(p, lcp->lc_newsize); 4162 p = xdr_encode_hyper(p, lcp->lc_newsize);
4244 } 4163 }
4245 4164
4246 return nfs_ok; 4165 return 0;
4247} 4166}
4248 4167
4249static __be32 4168static __be32
@@ -4253,16 +4172,13 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
4253 struct xdr_stream *xdr = &resp->xdr; 4172 struct xdr_stream *xdr = &resp->xdr;
4254 __be32 *p; 4173 __be32 *p;
4255 4174
4256 if (nfserr)
4257 return nfserr;
4258
4259 p = xdr_reserve_space(xdr, 4); 4175 p = xdr_reserve_space(xdr, 4);
4260 if (!p) 4176 if (!p)
4261 return nfserr_resource; 4177 return nfserr_resource;
4262 *p++ = cpu_to_be32(lrp->lrs_present); 4178 *p++ = cpu_to_be32(lrp->lrs_present);
4263 if (lrp->lrs_present) 4179 if (lrp->lrs_present)
4264 return nfsd4_encode_stateid(xdr, &lrp->lr_sid); 4180 return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
4265 return nfs_ok; 4181 return 0;
4266} 4182}
4267#endif /* CONFIG_NFSD_PNFS */ 4183#endif /* CONFIG_NFSD_PNFS */
4268 4184
@@ -4289,16 +4205,14 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
4289{ 4205{
4290 __be32 *p; 4206 __be32 *p;
4291 4207
4292 if (!nfserr) { 4208 nfserr = nfsd42_encode_write_res(resp, &copy->cp_res);
4293 nfserr = nfsd42_encode_write_res(resp, &copy->cp_res); 4209 if (nfserr)
4294 if (nfserr) 4210 return nfserr;
4295 return nfserr;
4296 4211
4297 p = xdr_reserve_space(&resp->xdr, 4 + 4); 4212 p = xdr_reserve_space(&resp->xdr, 4 + 4);
4298 *p++ = cpu_to_be32(copy->cp_consecutive); 4213 *p++ = cpu_to_be32(copy->cp_consecutive);
4299 *p++ = cpu_to_be32(copy->cp_synchronous); 4214 *p++ = cpu_to_be32(copy->cp_synchronous);
4300 } 4215 return 0;
4301 return nfserr;
4302} 4216}
4303 4217
4304static __be32 4218static __be32
@@ -4307,14 +4221,11 @@ nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
4307{ 4221{
4308 __be32 *p; 4222 __be32 *p;
4309 4223
4310 if (nfserr)
4311 return nfserr;
4312
4313 p = xdr_reserve_space(&resp->xdr, 4 + 8); 4224 p = xdr_reserve_space(&resp->xdr, 4 + 8);
4314 *p++ = cpu_to_be32(seek->seek_eof); 4225 *p++ = cpu_to_be32(seek->seek_eof);
4315 p = xdr_encode_hyper(p, seek->seek_pos); 4226 p = xdr_encode_hyper(p, seek->seek_pos);
4316 4227
4317 return nfserr; 4228 return 0;
4318} 4229}
4319 4230
4320static __be32 4231static __be32
@@ -4330,7 +4241,7 @@ typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
4330 * since we don't need to filter out obsolete ops as this is 4241 * since we don't need to filter out obsolete ops as this is
4331 * done in the decoding phase. 4242 * done in the decoding phase.
4332 */ 4243 */
4333static nfsd4_enc nfsd4_enc_ops[] = { 4244static const nfsd4_enc nfsd4_enc_ops[] = {
4334 [OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access, 4245 [OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access,
4335 [OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close, 4246 [OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close,
4336 [OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit, 4247 [OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit,
@@ -4449,6 +4360,7 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
4449 struct xdr_stream *xdr = &resp->xdr; 4360 struct xdr_stream *xdr = &resp->xdr;
4450 struct nfs4_stateowner *so = resp->cstate.replay_owner; 4361 struct nfs4_stateowner *so = resp->cstate.replay_owner;
4451 struct svc_rqst *rqstp = resp->rqstp; 4362 struct svc_rqst *rqstp = resp->rqstp;
4363 const struct nfsd4_operation *opdesc = op->opdesc;
4452 int post_err_offset; 4364 int post_err_offset;
4453 nfsd4_enc encoder; 4365 nfsd4_enc encoder;
4454 __be32 *p; 4366 __be32 *p;
@@ -4463,10 +4375,15 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
4463 4375
4464 if (op->opnum == OP_ILLEGAL) 4376 if (op->opnum == OP_ILLEGAL)
4465 goto status; 4377 goto status;
4378 if (op->status && opdesc &&
4379 !(opdesc->op_flags & OP_NONTRIVIAL_ERROR_ENCODE))
4380 goto status;
4466 BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) || 4381 BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) ||
4467 !nfsd4_enc_ops[op->opnum]); 4382 !nfsd4_enc_ops[op->opnum]);
4468 encoder = nfsd4_enc_ops[op->opnum]; 4383 encoder = nfsd4_enc_ops[op->opnum];
4469 op->status = encoder(resp, op->status, &op->u); 4384 op->status = encoder(resp, op->status, &op->u);
4385 if (opdesc && opdesc->op_release)
4386 opdesc->op_release(&op->u);
4470 xdr_commit_encode(xdr); 4387 xdr_commit_encode(xdr);
4471 4388
4472 /* nfsd4_check_resp_size guarantees enough room for error status */ 4389 /* nfsd4_check_resp_size guarantees enough room for error status */
@@ -4573,6 +4490,7 @@ nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p)
4573 args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len; 4490 args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
4574 args->pagelist = rqstp->rq_arg.pages; 4491 args->pagelist = rqstp->rq_arg.pages;
4575 args->pagelen = rqstp->rq_arg.page_len; 4492 args->pagelen = rqstp->rq_arg.page_len;
4493 args->tail = false;
4576 args->tmpp = NULL; 4494 args->tmpp = NULL;
4577 args->to_free = NULL; 4495 args->to_free = NULL;
4578 args->ops = args->iops; 4496 args->ops = args->iops;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 063ae7de2c12..7e3af3ef0917 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -475,7 +475,7 @@ static int nfsd_get_default_max_blksize(void)
475 return ret; 475 return ret;
476} 476}
477 477
478static struct svc_serv_ops nfsd_thread_sv_ops = { 478static const struct svc_serv_ops nfsd_thread_sv_ops = {
479 .svo_shutdown = nfsd_last_thread, 479 .svo_shutdown = nfsd_last_thread,
480 .svo_function = nfsd, 480 .svo_function = nfsd,
481 .svo_enqueue_xprt = svc_xprt_do_enqueue, 481 .svo_enqueue_xprt = svc_xprt_do_enqueue,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 72c6ad136107..1e4edbf70052 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -538,6 +538,7 @@ struct nfsd4_seek {
538 538
539struct nfsd4_op { 539struct nfsd4_op {
540 int opnum; 540 int opnum;
541 const struct nfsd4_operation * opdesc;
541 __be32 status; 542 __be32 status;
542 union nfsd4_op_u { 543 union nfsd4_op_u {
543 struct nfsd4_access access; 544 struct nfsd4_access access;
@@ -614,6 +615,7 @@ struct nfsd4_compoundargs {
614 __be32 * end; 615 __be32 * end;
615 struct page ** pagelist; 616 struct page ** pagelist;
616 int pagelen; 617 int pagelen;
618 bool tail;
617 __be32 tmp[8]; 619 __be32 tmp[8];
618 __be32 * tmpp; 620 __be32 * tmpp;
619 struct svcxdr_tmpbuf *to_free; 621 struct svcxdr_tmpbuf *to_free;
@@ -661,6 +663,7 @@ static inline bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
661 return argp->opcnt == resp->opcnt; 663 return argp->opcnt == resp->opcnt;
662} 664}
663 665
666const struct nfsd4_operation *OPDESC(struct nfsd4_op *op);
664int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op); 667int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op);
665void warn_on_nonidempotent_op(struct nfsd4_op *op); 668void warn_on_nonidempotent_op(struct nfsd4_op *op);
666 669
@@ -748,6 +751,53 @@ extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp,
748 struct nfsd4_compound_state *, union nfsd4_op_u *); 751 struct nfsd4_compound_state *, union nfsd4_op_u *);
749extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr); 752extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr);
750 753
754enum nfsd4_op_flags {
755 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
756 ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
757 ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
758 /* For rfc 5661 section 2.6.3.1.1: */
759 OP_HANDLES_WRONGSEC = 1 << 3,
760 OP_IS_PUTFH_LIKE = 1 << 4,
761 /*
762 * These are the ops whose result size we estimate before
763 * encoding, to avoid performing an op then not being able to
764 * respond or cache a response. This includes writes and setattrs
765 * as well as the operations usually called "nonidempotent":
766 */
767 OP_MODIFIES_SOMETHING = 1 << 5,
768 /*
769 * Cache compounds containing these ops in the xid-based drc:
770 * We use the DRC for compounds containing non-idempotent
771 * operations, *except* those that are 4.1-specific (since
772 * sessions provide their own EOS), and except for stateful
773 * operations other than setclientid and setclientid_confirm
774 * (since sequence numbers provide EOS for open, lock, etc in
775 * the v4.0 case).
776 */
777 OP_CACHEME = 1 << 6,
778 /*
779 * These are ops which clear current state id.
780 */
781 OP_CLEAR_STATEID = 1 << 7,
782 /* Most ops return only an error on failure; some may do more: */
783 OP_NONTRIVIAL_ERROR_ENCODE = 1 << 8,
784};
785
786struct nfsd4_operation {
787 __be32 (*op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
788 union nfsd4_op_u *);
789 void (*op_release)(union nfsd4_op_u *);
790 u32 op_flags;
791 char *op_name;
792 /* Try to get response size before operation */
793 u32 (*op_rsize_bop)(struct svc_rqst *, struct nfsd4_op *);
794 void (*op_get_currentstateid)(struct nfsd4_compound_state *,
795 union nfsd4_op_u *);
796 void (*op_set_currentstateid)(struct nfsd4_compound_state *,
797 union nfsd4_op_u *);
798};
799
800
751#endif 801#endif
752 802
753/* 803/*
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a3f8af9bd543..38f561b2dda3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@ struct svc_serv {
99 99
100 unsigned int sv_nrpools; /* number of thread pools */ 100 unsigned int sv_nrpools; /* number of thread pools */
101 struct svc_pool * sv_pools; /* array of thread pools */ 101 struct svc_pool * sv_pools; /* array of thread pools */
102 struct svc_serv_ops *sv_ops; /* server operations */ 102 const struct svc_serv_ops *sv_ops; /* server operations */
103#if defined(CONFIG_SUNRPC_BACKCHANNEL) 103#if defined(CONFIG_SUNRPC_BACKCHANNEL)
104 struct list_head sv_cb_list; /* queue for callback requests 104 struct list_head sv_cb_list; /* queue for callback requests
105 * that arrive over the same 105 * that arrive over the same
@@ -465,7 +465,7 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
465void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); 465void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
466int svc_bind(struct svc_serv *serv, struct net *net); 466int svc_bind(struct svc_serv *serv, struct net *net);
467struct svc_serv *svc_create(struct svc_program *, unsigned int, 467struct svc_serv *svc_create(struct svc_program *, unsigned int,
468 struct svc_serv_ops *); 468 const struct svc_serv_ops *);
469struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, 469struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
470 struct svc_pool *pool, int node); 470 struct svc_pool *pool, int node);
471struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 471struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
@@ -475,7 +475,7 @@ void svc_exit_thread(struct svc_rqst *);
475unsigned int svc_pool_map_get(void); 475unsigned int svc_pool_map_get(void);
476void svc_pool_map_put(void); 476void svc_pool_map_put(void);
477struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 477struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
478 struct svc_serv_ops *); 478 const struct svc_serv_ops *);
479int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 479int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
480int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int); 480int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
481int svc_pool_stats_open(struct svc_serv *serv, struct file *file); 481int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ddb7f94a9d06..6a2ad38f5458 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -31,7 +31,7 @@ struct svc_xprt_ops {
31struct svc_xprt_class { 31struct svc_xprt_class {
32 const char *xcl_name; 32 const char *xcl_name;
33 struct module *xcl_owner; 33 struct module *xcl_owner;
34 struct svc_xprt_ops *xcl_ops; 34 const struct svc_xprt_ops *xcl_ops;
35 struct list_head xcl_list; 35 struct list_head xcl_list;
36 u32 xcl_max_payload; 36 u32 xcl_max_payload;
37 int xcl_ident; 37 int xcl_ident;
@@ -49,7 +49,7 @@ struct svc_xpt_user {
49 49
50struct svc_xprt { 50struct svc_xprt {
51 struct svc_xprt_class *xpt_class; 51 struct svc_xprt_class *xpt_class;
52 struct svc_xprt_ops *xpt_ops; 52 const struct svc_xprt_ops *xpt_ops;
53 struct kref xpt_ref; 53 struct kref xpt_ref;
54 struct list_head xpt_list; 54 struct list_head xpt_list;
55 struct list_head xpt_ready; 55 struct list_head xpt_ready;
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
index 377d865e506d..a3cbbc7b6417 100644
--- a/include/rdma/rw.h
+++ b/include/rdma/rw.h
@@ -81,6 +81,8 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
81int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 81int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
82 struct ib_cqe *cqe, struct ib_send_wr *chain_wr); 82 struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
83 83
84unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
85 unsigned int maxpages);
84void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); 86void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
85int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); 87int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
86void rdma_rw_cleanup_mrs(struct ib_qp *qp); 88void rdma_rw_cleanup_mrs(struct ib_qp *qp);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 85ce0db5b0a6..aa04666f929d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -421,7 +421,7 @@ __svc_init_bc(struct svc_serv *serv)
421 */ 421 */
422static struct svc_serv * 422static struct svc_serv *
423__svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 423__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
424 struct svc_serv_ops *ops) 424 const struct svc_serv_ops *ops)
425{ 425{
426 struct svc_serv *serv; 426 struct svc_serv *serv;
427 unsigned int vers; 427 unsigned int vers;
@@ -486,7 +486,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
486 486
487struct svc_serv * 487struct svc_serv *
488svc_create(struct svc_program *prog, unsigned int bufsize, 488svc_create(struct svc_program *prog, unsigned int bufsize,
489 struct svc_serv_ops *ops) 489 const struct svc_serv_ops *ops)
490{ 490{
491 return __svc_create(prog, bufsize, /*npools*/1, ops); 491 return __svc_create(prog, bufsize, /*npools*/1, ops);
492} 492}
@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(svc_create);
494 494
495struct svc_serv * 495struct svc_serv *
496svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 496svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
497 struct svc_serv_ops *ops) 497 const struct svc_serv_ops *ops)
498{ 498{
499 struct svc_serv *serv; 499 struct svc_serv *serv;
500 unsigned int npools = svc_pool_map_get(); 500 unsigned int npools = svc_pool_map_get();
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e18500151236..399fab5d1936 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -693,7 +693,7 @@ static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
693 return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags); 693 return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags);
694} 694}
695 695
696static struct svc_xprt_ops svc_udp_ops = { 696static const struct svc_xprt_ops svc_udp_ops = {
697 .xpo_create = svc_udp_create, 697 .xpo_create = svc_udp_create,
698 .xpo_recvfrom = svc_udp_recvfrom, 698 .xpo_recvfrom = svc_udp_recvfrom,
699 .xpo_sendto = svc_udp_sendto, 699 .xpo_sendto = svc_udp_sendto,
@@ -1241,7 +1241,7 @@ static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
1241{ 1241{
1242} 1242}
1243 1243
1244static struct svc_xprt_ops svc_tcp_bc_ops = { 1244static const struct svc_xprt_ops svc_tcp_bc_ops = {
1245 .xpo_create = svc_bc_tcp_create, 1245 .xpo_create = svc_bc_tcp_create,
1246 .xpo_detach = svc_bc_tcp_sock_detach, 1246 .xpo_detach = svc_bc_tcp_sock_detach,
1247 .xpo_free = svc_bc_sock_free, 1247 .xpo_free = svc_bc_sock_free,
@@ -1275,7 +1275,7 @@ static void svc_cleanup_bc_xprt_sock(void)
1275} 1275}
1276#endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1276#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1277 1277
1278static struct svc_xprt_ops svc_tcp_ops = { 1278static const struct svc_xprt_ops svc_tcp_ops = {
1279 .xpo_create = svc_tcp_create, 1279 .xpo_create = svc_tcp_create,
1280 .xpo_recvfrom = svc_tcp_recvfrom, 1280 .xpo_recvfrom = svc_tcp_recvfrom,
1281 .xpo_sendto = svc_tcp_sendto, 1281 .xpo_sendto = svc_tcp_sendto,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 933f79bed270..7dcda4597057 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -660,19 +660,21 @@ out_initerr:
660 return -EIO; 660 return -EIO;
661} 661}
662 662
663/* Walk the segments in the Read chunk starting at @p and construct
664 * RDMA Read operations to pull the chunk to the server.
665 */
663static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, 666static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
664 struct svc_rdma_read_info *info, 667 struct svc_rdma_read_info *info,
665 __be32 *p) 668 __be32 *p)
666{ 669{
667 int ret; 670 int ret;
668 671
672 ret = -EINVAL;
669 info->ri_chunklen = 0; 673 info->ri_chunklen = 0;
670 while (*p++ != xdr_zero) { 674 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
671 u32 rs_handle, rs_length; 675 u32 rs_handle, rs_length;
672 u64 rs_offset; 676 u64 rs_offset;
673 677
674 if (be32_to_cpup(p++) != info->ri_position)
675 break;
676 rs_handle = be32_to_cpup(p++); 678 rs_handle = be32_to_cpup(p++);
677 rs_length = be32_to_cpup(p++); 679 rs_length = be32_to_cpup(p++);
678 p = xdr_decode_hyper(p, &rs_offset); 680 p = xdr_decode_hyper(p, &rs_offset);
@@ -689,78 +691,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
689 return ret; 691 return ret;
690} 692}
691 693
692/* If there is inline content following the Read chunk, append it to
693 * the page list immediately following the data payload. This has to
694 * be done after the reader function has determined how many pages
695 * were consumed for RDMA Read.
696 *
697 * On entry, ri_pageno and ri_pageoff point directly to the end of the
698 * page list. On exit, both have been updated to the new "next byte".
699 *
700 * Assumptions:
701 * - Inline content fits entirely in rq_pages[0]
702 * - Trailing content is only a handful of bytes
703 */
704static int svc_rdma_copy_tail(struct svc_rqst *rqstp,
705 struct svc_rdma_read_info *info)
706{
707 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
708 unsigned int tail_length, remaining;
709 u8 *srcp, *destp;
710
711 /* Assert that all inline content fits in page 0. This is an
712 * implementation limit, not a protocol limit.
713 */
714 if (head->arg.head[0].iov_len > PAGE_SIZE) {
715 pr_warn_once("svcrdma: too much trailing inline content\n");
716 return -EINVAL;
717 }
718
719 srcp = head->arg.head[0].iov_base;
720 srcp += info->ri_position;
721 tail_length = head->arg.head[0].iov_len - info->ri_position;
722 remaining = tail_length;
723
724 /* If there is room on the last page in the page list, try to
725 * fit the trailing content there.
726 */
727 if (info->ri_pageoff > 0) {
728 unsigned int len;
729
730 len = min_t(unsigned int, remaining,
731 PAGE_SIZE - info->ri_pageoff);
732 destp = page_address(rqstp->rq_pages[info->ri_pageno]);
733 destp += info->ri_pageoff;
734
735 memcpy(destp, srcp, len);
736 srcp += len;
737 destp += len;
738 info->ri_pageoff += len;
739 remaining -= len;
740
741 if (info->ri_pageoff == PAGE_SIZE) {
742 info->ri_pageno++;
743 info->ri_pageoff = 0;
744 }
745 }
746
747 /* Otherwise, a fresh page is needed. */
748 if (remaining) {
749 head->arg.pages[info->ri_pageno] =
750 rqstp->rq_pages[info->ri_pageno];
751 head->count++;
752
753 destp = page_address(rqstp->rq_pages[info->ri_pageno]);
754 memcpy(destp, srcp, remaining);
755 info->ri_pageoff += remaining;
756 }
757
758 head->arg.page_len += tail_length;
759 head->arg.len += tail_length;
760 head->arg.buflen += tail_length;
761 return 0;
762}
763
764/* Construct RDMA Reads to pull over a normal Read chunk. The chunk 694/* Construct RDMA Reads to pull over a normal Read chunk. The chunk
765 * data lands in the page list of head->arg.pages. 695 * data lands in the page list of head->arg.pages.
766 * 696 *
@@ -785,34 +715,28 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
785 if (ret < 0) 715 if (ret < 0)
786 goto out; 716 goto out;
787 717
788 /* Read chunk may need XDR round-up (see RFC 5666, s. 3.7). 718 /* Split the Receive buffer between the head and tail
719 * buffers at Read chunk's position. XDR roundup of the
720 * chunk is not included in either the pagelist or in
721 * the tail.
789 */ 722 */
790 if (info->ri_chunklen & 3) { 723 head->arg.tail[0].iov_base =
791 u32 padlen = 4 - (info->ri_chunklen & 3); 724 head->arg.head[0].iov_base + info->ri_position;
792 725 head->arg.tail[0].iov_len =
793 info->ri_chunklen += padlen; 726 head->arg.head[0].iov_len - info->ri_position;
727 head->arg.head[0].iov_len = info->ri_position;
794 728
795 /* NB: data payload always starts on XDR alignment, 729 /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7).
796 * thus the pad can never contain a page boundary. 730 *
797 */ 731 * NFSv2/3 write decoders need the length of the tail to
798 info->ri_pageoff += padlen; 732 * contain the size of the roundup padding.
799 if (info->ri_pageoff == PAGE_SIZE) { 733 */
800 info->ri_pageno++; 734 head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3);
801 info->ri_pageoff = 0;
802 }
803 }
804 735
805 head->arg.page_len = info->ri_chunklen; 736 head->arg.page_len = info->ri_chunklen;
806 head->arg.len += info->ri_chunklen; 737 head->arg.len += info->ri_chunklen;
807 head->arg.buflen += info->ri_chunklen; 738 head->arg.buflen += info->ri_chunklen;
808 739
809 if (info->ri_position < head->arg.head[0].iov_len) {
810 ret = svc_rdma_copy_tail(rqstp, info);
811 if (ret < 0)
812 goto out;
813 }
814 head->arg.head[0].iov_len = info->ri_position;
815
816out: 740out:
817 return ret; 741 return ret;
818} 742}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index e660d4965b18..5caf8e722a11 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -51,6 +51,7 @@
51#include <linux/workqueue.h> 51#include <linux/workqueue.h>
52#include <rdma/ib_verbs.h> 52#include <rdma/ib_verbs.h>
53#include <rdma/rdma_cm.h> 53#include <rdma/rdma_cm.h>
54#include <rdma/rw.h>
54#include <linux/sunrpc/svc_rdma.h> 55#include <linux/sunrpc/svc_rdma.h>
55#include <linux/export.h> 56#include <linux/export.h>
56#include "xprt_rdma.h" 57#include "xprt_rdma.h"
@@ -70,7 +71,7 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt);
70static int svc_rdma_secure_port(struct svc_rqst *); 71static int svc_rdma_secure_port(struct svc_rqst *);
71static void svc_rdma_kill_temp_xprt(struct svc_xprt *); 72static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
72 73
73static struct svc_xprt_ops svc_rdma_ops = { 74static const struct svc_xprt_ops svc_rdma_ops = {
74 .xpo_create = svc_rdma_create, 75 .xpo_create = svc_rdma_create,
75 .xpo_recvfrom = svc_rdma_recvfrom, 76 .xpo_recvfrom = svc_rdma_recvfrom,
76 .xpo_sendto = svc_rdma_sendto, 77 .xpo_sendto = svc_rdma_sendto,
@@ -98,7 +99,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *,
98static void svc_rdma_bc_detach(struct svc_xprt *); 99static void svc_rdma_bc_detach(struct svc_xprt *);
99static void svc_rdma_bc_free(struct svc_xprt *); 100static void svc_rdma_bc_free(struct svc_xprt *);
100 101
101static struct svc_xprt_ops svc_rdma_bc_ops = { 102static const struct svc_xprt_ops svc_rdma_bc_ops = {
102 .xpo_create = svc_rdma_bc_create, 103 .xpo_create = svc_rdma_bc_create,
103 .xpo_detach = svc_rdma_bc_detach, 104 .xpo_detach = svc_rdma_bc_detach,
104 .xpo_free = svc_rdma_bc_free, 105 .xpo_free = svc_rdma_bc_free,
@@ -167,8 +168,8 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
167{ 168{
168 unsigned int i; 169 unsigned int i;
169 170
170 /* Each RPC/RDMA credit can consume a number of send 171 /* Each RPC/RDMA credit can consume one Receive and
171 * and receive WQEs. One ctxt is allocated for each. 172 * one Send WQE at the same time.
172 */ 173 */
173 i = xprt->sc_sq_depth + xprt->sc_rq_depth; 174 i = xprt->sc_sq_depth + xprt->sc_rq_depth;
174 175
@@ -713,7 +714,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
713 struct ib_qp_init_attr qp_attr; 714 struct ib_qp_init_attr qp_attr;
714 struct ib_device *dev; 715 struct ib_device *dev;
715 struct sockaddr *sap; 716 struct sockaddr *sap;
716 unsigned int i; 717 unsigned int i, ctxts;
717 int ret = 0; 718 int ret = 0;
718 719
719 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); 720 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
@@ -742,14 +743,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
742 newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge, 743 newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
743 (size_t)RPCSVC_MAXPAGES); 744 (size_t)RPCSVC_MAXPAGES);
744 newxprt->sc_max_req_size = svcrdma_max_req_size; 745 newxprt->sc_max_req_size = svcrdma_max_req_size;
745 newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr, 746 newxprt->sc_max_requests = svcrdma_max_requests;
746 svcrdma_max_requests); 747 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
747 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
748 newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
749 svcrdma_max_bc_requests);
750 newxprt->sc_rq_depth = newxprt->sc_max_requests + 748 newxprt->sc_rq_depth = newxprt->sc_max_requests +
751 newxprt->sc_max_bc_requests; 749 newxprt->sc_max_bc_requests;
752 newxprt->sc_sq_depth = newxprt->sc_rq_depth; 750 if (newxprt->sc_rq_depth > dev->attrs.max_qp_wr) {
751 pr_warn("svcrdma: reducing receive depth to %d\n",
752 dev->attrs.max_qp_wr);
753 newxprt->sc_rq_depth = dev->attrs.max_qp_wr;
754 newxprt->sc_max_requests = newxprt->sc_rq_depth - 2;
755 newxprt->sc_max_bc_requests = 2;
756 }
757 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
758 ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
759 ctxts *= newxprt->sc_max_requests;
760 newxprt->sc_sq_depth = newxprt->sc_rq_depth + ctxts;
761 if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
762 pr_warn("svcrdma: reducing send depth to %d\n",
763 dev->attrs.max_qp_wr);
764 newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
765 }
753 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); 766 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
754 767
755 if (!svc_rdma_prealloc_ctxts(newxprt)) 768 if (!svc_rdma_prealloc_ctxts(newxprt))
@@ -784,8 +797,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
784 qp_attr.event_handler = qp_event_handler; 797 qp_attr.event_handler = qp_event_handler;
785 qp_attr.qp_context = &newxprt->sc_xprt; 798 qp_attr.qp_context = &newxprt->sc_xprt;
786 qp_attr.port_num = newxprt->sc_port_num; 799 qp_attr.port_num = newxprt->sc_port_num;
787 qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests; 800 qp_attr.cap.max_rdma_ctxs = ctxts;
788 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; 801 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
789 qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; 802 qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
790 qp_attr.cap.max_send_sge = newxprt->sc_max_sge; 803 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
791 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; 804 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
@@ -853,6 +866,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
853 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap)); 866 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
854 dprintk(" max_sge : %d\n", newxprt->sc_max_sge); 867 dprintk(" max_sge : %d\n", newxprt->sc_max_sge);
855 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth); 868 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
869 dprintk(" rdma_rw_ctxs : %d\n", ctxts);
856 dprintk(" max_requests : %d\n", newxprt->sc_max_requests); 870 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
857 dprintk(" ord : %d\n", newxprt->sc_ord); 871 dprintk(" ord : %d\n", newxprt->sc_ord);
858 872