diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-01 19:10:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-01 19:10:30 -0500 |
commit | 8f03cf50bc9443e92d6e54ac4d599357d6cb7cbb (patch) | |
tree | 0c38aab3a4c24d06fb05376b651157627bc1669d | |
parent | 25c4e6c3f0c14d1575aa488ff4ca47e045ae51a0 (diff) | |
parent | ed92d8c137b7794c2c2aa14479298b9885967607 (diff) |
Merge tag 'nfs-for-4.11-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
Pull NFS client updates from Anna Schumaker:
"Highlights include:
Stable bugfixes:
- NFSv4: Fix memory and state leak in _nfs4_open_and_get_state
- xprtrdma: Fix Read chunk padding
- xprtrdma: Per-connection pad optimization
- xprtrdma: Disable pad optimization by default
- xprtrdma: Reduce required number of send SGEs
- nlm: Ensure callback code also checks that the files match
- pNFS/flexfiles: If the layout is invalid, it must be updated before
retrying
- NFSv4: Fix reboot recovery in copy offload
- Revert "NFSv4.1: Handle NFS4ERR_BADSESSION/NFS4ERR_DEADSESSION
replies to OP_SEQUENCE"
- NFSv4: fix getacl head length estimation
- NFSv4: fix getacl ERANGE for sum ACL buffer sizes
Features:
- Add and use dprintk_cont macros
- Various cleanups to NFS v4.x to reduce code duplication and
complexity
- Remove unused cr_magic related code
- Improvements to sunrpc "read from buffer" code
- Clean up sunrpc timeout code and allow changing TCP timeout
parameters
- Remove duplicate mw_list management code in xprtrdma
- Add generic functions for encoding and decoding xdr streams
Bugfixes:
- Clean up nfs_show_mountd_netid
- Make layoutreturn_ops static and use NULL instead of 0 to fix
sparse warnings
- Properly handle -ERESTARTSYS in nfs_rename()
- Check if register_shrinker() failed during rpcauth_init()
- Properly clean up procfs/pipefs entries
- Various NFS over RDMA related fixes
- Silence unititialized variable warning in sunrpc"
* tag 'nfs-for-4.11-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (64 commits)
NFSv4: fix getacl ERANGE for some ACL buffer sizes
NFSv4: fix getacl head length estimation
Revert "NFSv4.1: Handle NFS4ERR_BADSESSION/NFS4ERR_DEADSESSION replies to OP_SEQUENCE"
NFSv4: Fix reboot recovery in copy offload
pNFS/flexfiles: If the layout is invalid, it must be updated before retrying
NFSv4: Clean up owner/group attribute decode
SUNRPC: Add a helper function xdr_stream_decode_string_dup()
NFSv4: Remove bogus "struct nfs_client" argument from decode_ace()
NFSv4: Fix the underestimation of delegation XDR space reservation
NFSv4: Replace callback string decode function with a generic
NFSv4: Replace the open coded decode_opaque_inline() with the new generic
NFSv4: Replace ad-hoc xdr encode/decode helpers with xdr_stream_* generics
SUNRPC: Add generic helpers for xdr_stream encode/decode
sunrpc: silence uninitialized variable warning
nlm: Ensure callback code also checks that the files match
sunrpc: Allow xprt->ops->timer method to sleep
xprtrdma: Refactor management of mw_list field
xprtrdma: Handle stale connection rejection
xprtrdma: Properly recover FRWRs with in-flight FASTREG WRs
xprtrdma: Shrink send SGEs array
...
39 files changed, 902 insertions, 832 deletions
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 6de15709d024..2ae676f93e6b 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c | |||
@@ -141,8 +141,7 @@ int nfs_cache_register_net(struct net *net, struct cache_detail *cd) | |||
141 | 141 | ||
142 | void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) | 142 | void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) |
143 | { | 143 | { |
144 | if (cd->u.pipefs.dir) | 144 | sunrpc_cache_unregister_pipefs(cd); |
145 | sunrpc_cache_unregister_pipefs(cd); | ||
146 | } | 145 | } |
147 | 146 | ||
148 | void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd) | 147 | void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd) |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index fd0284c1dc32..d051fc3583a9 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -83,23 +83,15 @@ static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes) | |||
83 | return p; | 83 | return p; |
84 | } | 84 | } |
85 | 85 | ||
86 | static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) | 86 | static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, |
87 | const char **str, size_t maxlen) | ||
87 | { | 88 | { |
88 | __be32 *p; | 89 | ssize_t err; |
89 | |||
90 | p = read_buf(xdr, 4); | ||
91 | if (unlikely(p == NULL)) | ||
92 | return htonl(NFS4ERR_RESOURCE); | ||
93 | *len = ntohl(*p); | ||
94 | |||
95 | if (*len != 0) { | ||
96 | p = read_buf(xdr, *len); | ||
97 | if (unlikely(p == NULL)) | ||
98 | return htonl(NFS4ERR_RESOURCE); | ||
99 | *str = (const char *)p; | ||
100 | } else | ||
101 | *str = NULL; | ||
102 | 90 | ||
91 | err = xdr_stream_decode_opaque_inline(xdr, (void **)str, maxlen); | ||
92 | if (err < 0) | ||
93 | return cpu_to_be32(NFS4ERR_RESOURCE); | ||
94 | *len = err; | ||
103 | return 0; | 95 | return 0; |
104 | } | 96 | } |
105 | 97 | ||
@@ -162,15 +154,9 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound | |||
162 | __be32 *p; | 154 | __be32 *p; |
163 | __be32 status; | 155 | __be32 status; |
164 | 156 | ||
165 | status = decode_string(xdr, &hdr->taglen, &hdr->tag); | 157 | status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ); |
166 | if (unlikely(status != 0)) | 158 | if (unlikely(status != 0)) |
167 | return status; | 159 | return status; |
168 | /* We do not like overly long tags! */ | ||
169 | if (hdr->taglen > CB_OP_TAGLEN_MAXSZ) { | ||
170 | printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n", | ||
171 | __func__, hdr->taglen); | ||
172 | return htonl(NFS4ERR_RESOURCE); | ||
173 | } | ||
174 | p = read_buf(xdr, 12); | 160 | p = read_buf(xdr, 12); |
175 | if (unlikely(p == NULL)) | 161 | if (unlikely(p == NULL)) |
176 | return htonl(NFS4ERR_RESOURCE); | 162 | return htonl(NFS4ERR_RESOURCE); |
@@ -582,12 +568,8 @@ out: | |||
582 | 568 | ||
583 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) | 569 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) |
584 | { | 570 | { |
585 | __be32 *p; | 571 | if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0)) |
586 | 572 | return cpu_to_be32(NFS4ERR_RESOURCE); | |
587 | p = xdr_reserve_space(xdr, 4 + len); | ||
588 | if (unlikely(p == NULL)) | ||
589 | return htonl(NFS4ERR_RESOURCE); | ||
590 | xdr_encode_opaque(p, str, len); | ||
591 | return 0; | 573 | return 0; |
592 | } | 574 | } |
593 | 575 | ||
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fad81041f5ab..fb499a3f21b5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -2002,6 +2002,29 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | |||
2002 | } | 2002 | } |
2003 | EXPORT_SYMBOL_GPL(nfs_link); | 2003 | EXPORT_SYMBOL_GPL(nfs_link); |
2004 | 2004 | ||
2005 | static void | ||
2006 | nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data) | ||
2007 | { | ||
2008 | struct dentry *old_dentry = data->old_dentry; | ||
2009 | struct dentry *new_dentry = data->new_dentry; | ||
2010 | struct inode *old_inode = d_inode(old_dentry); | ||
2011 | struct inode *new_inode = d_inode(new_dentry); | ||
2012 | |||
2013 | nfs_mark_for_revalidate(old_inode); | ||
2014 | |||
2015 | switch (task->tk_status) { | ||
2016 | case 0: | ||
2017 | if (new_inode != NULL) | ||
2018 | nfs_drop_nlink(new_inode); | ||
2019 | d_move(old_dentry, new_dentry); | ||
2020 | nfs_set_verifier(new_dentry, | ||
2021 | nfs_save_change_attribute(data->new_dir)); | ||
2022 | break; | ||
2023 | case -ENOENT: | ||
2024 | nfs_dentry_handle_enoent(old_dentry); | ||
2025 | } | ||
2026 | } | ||
2027 | |||
2005 | /* | 2028 | /* |
2006 | * RENAME | 2029 | * RENAME |
2007 | * FIXME: Some nfsds, like the Linux user space nfsd, may generate a | 2030 | * FIXME: Some nfsds, like the Linux user space nfsd, may generate a |
@@ -2084,7 +2107,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
2084 | if (new_inode != NULL) | 2107 | if (new_inode != NULL) |
2085 | NFS_PROTO(new_inode)->return_delegation(new_inode); | 2108 | NFS_PROTO(new_inode)->return_delegation(new_inode); |
2086 | 2109 | ||
2087 | task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); | 2110 | task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, |
2111 | nfs_complete_rename); | ||
2088 | if (IS_ERR(task)) { | 2112 | if (IS_ERR(task)) { |
2089 | error = PTR_ERR(task); | 2113 | error = PTR_ERR(task); |
2090 | goto out; | 2114 | goto out; |
@@ -2094,21 +2118,11 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
2094 | if (error == 0) | 2118 | if (error == 0) |
2095 | error = task->tk_status; | 2119 | error = task->tk_status; |
2096 | rpc_put_task(task); | 2120 | rpc_put_task(task); |
2097 | nfs_mark_for_revalidate(old_inode); | ||
2098 | out: | 2121 | out: |
2099 | if (rehash) | 2122 | if (rehash) |
2100 | d_rehash(rehash); | 2123 | d_rehash(rehash); |
2101 | trace_nfs_rename_exit(old_dir, old_dentry, | 2124 | trace_nfs_rename_exit(old_dir, old_dentry, |
2102 | new_dir, new_dentry, error); | 2125 | new_dir, new_dentry, error); |
2103 | if (!error) { | ||
2104 | if (new_inode != NULL) | ||
2105 | nfs_drop_nlink(new_inode); | ||
2106 | d_move(old_dentry, new_dentry); | ||
2107 | nfs_set_verifier(new_dentry, | ||
2108 | nfs_save_change_attribute(new_dir)); | ||
2109 | } else if (error == -ENOENT) | ||
2110 | nfs_dentry_handle_enoent(old_dentry); | ||
2111 | |||
2112 | /* new dentry created? */ | 2126 | /* new dentry created? */ |
2113 | if (dentry) | 2127 | if (dentry) |
2114 | dput(dentry); | 2128 | dput(dentry); |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 18f98e08544d..44347f4bdc15 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
@@ -305,7 +305,7 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data) | |||
305 | } | 305 | } |
306 | hdr->pgio_done_cb = filelayout_read_done_cb; | 306 | hdr->pgio_done_cb = filelayout_read_done_cb; |
307 | 307 | ||
308 | if (nfs41_setup_sequence(hdr->ds_clp->cl_session, | 308 | if (nfs4_setup_sequence(hdr->ds_clp, |
309 | &hdr->args.seq_args, | 309 | &hdr->args.seq_args, |
310 | &hdr->res.seq_res, | 310 | &hdr->res.seq_res, |
311 | task)) | 311 | task)) |
@@ -403,7 +403,7 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data) | |||
403 | rpc_exit(task, 0); | 403 | rpc_exit(task, 0); |
404 | return; | 404 | return; |
405 | } | 405 | } |
406 | if (nfs41_setup_sequence(hdr->ds_clp->cl_session, | 406 | if (nfs4_setup_sequence(hdr->ds_clp, |
407 | &hdr->args.seq_args, | 407 | &hdr->args.seq_args, |
408 | &hdr->res.seq_res, | 408 | &hdr->res.seq_res, |
409 | task)) | 409 | task)) |
@@ -438,7 +438,7 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data) | |||
438 | { | 438 | { |
439 | struct nfs_commit_data *wdata = data; | 439 | struct nfs_commit_data *wdata = data; |
440 | 440 | ||
441 | nfs41_setup_sequence(wdata->ds_clp->cl_session, | 441 | nfs4_setup_sequence(wdata->ds_clp, |
442 | &wdata->args.seq_args, | 442 | &wdata->args.seq_args, |
443 | &wdata->res.seq_res, | 443 | &wdata->res.seq_res, |
444 | task); | 444 | task); |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index d6acc688df7e..42dedf2d625f 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c | |||
@@ -1053,9 +1053,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, | |||
1053 | struct nfs_client *mds_client = mds_server->nfs_client; | 1053 | struct nfs_client *mds_client = mds_server->nfs_client; |
1054 | struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; | 1054 | struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; |
1055 | 1055 | ||
1056 | if (task->tk_status >= 0) | ||
1057 | return 0; | ||
1058 | |||
1059 | switch (task->tk_status) { | 1056 | switch (task->tk_status) { |
1060 | /* MDS state errors */ | 1057 | /* MDS state errors */ |
1061 | case -NFS4ERR_DELEG_REVOKED: | 1058 | case -NFS4ERR_DELEG_REVOKED: |
@@ -1157,9 +1154,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, | |||
1157 | { | 1154 | { |
1158 | struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); | 1155 | struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); |
1159 | 1156 | ||
1160 | if (task->tk_status >= 0) | ||
1161 | return 0; | ||
1162 | |||
1163 | switch (task->tk_status) { | 1157 | switch (task->tk_status) { |
1164 | /* File access problems. Don't mark the device as unavailable */ | 1158 | /* File access problems. Don't mark the device as unavailable */ |
1165 | case -EACCES: | 1159 | case -EACCES: |
@@ -1195,6 +1189,13 @@ static int ff_layout_async_handle_error(struct rpc_task *task, | |||
1195 | { | 1189 | { |
1196 | int vers = clp->cl_nfs_mod->rpc_vers->number; | 1190 | int vers = clp->cl_nfs_mod->rpc_vers->number; |
1197 | 1191 | ||
1192 | if (task->tk_status >= 0) | ||
1193 | return 0; | ||
1194 | |||
1195 | /* Handle the case of an invalid layout segment */ | ||
1196 | if (!pnfs_is_valid_lseg(lseg)) | ||
1197 | return -NFS4ERR_RESET_TO_PNFS; | ||
1198 | |||
1198 | switch (vers) { | 1199 | switch (vers) { |
1199 | case 3: | 1200 | case 3: |
1200 | return ff_layout_async_handle_error_v3(task, lseg, idx); | 1201 | return ff_layout_async_handle_error_v3(task, lseg, idx); |
@@ -1384,30 +1385,14 @@ static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) | |||
1384 | rpc_call_start(task); | 1385 | rpc_call_start(task); |
1385 | } | 1386 | } |
1386 | 1387 | ||
1387 | static int ff_layout_setup_sequence(struct nfs_client *ds_clp, | ||
1388 | struct nfs4_sequence_args *args, | ||
1389 | struct nfs4_sequence_res *res, | ||
1390 | struct rpc_task *task) | ||
1391 | { | ||
1392 | if (ds_clp->cl_session) | ||
1393 | return nfs41_setup_sequence(ds_clp->cl_session, | ||
1394 | args, | ||
1395 | res, | ||
1396 | task); | ||
1397 | return nfs40_setup_sequence(ds_clp->cl_slot_tbl, | ||
1398 | args, | ||
1399 | res, | ||
1400 | task); | ||
1401 | } | ||
1402 | |||
1403 | static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) | 1388 | static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) |
1404 | { | 1389 | { |
1405 | struct nfs_pgio_header *hdr = data; | 1390 | struct nfs_pgio_header *hdr = data; |
1406 | 1391 | ||
1407 | if (ff_layout_setup_sequence(hdr->ds_clp, | 1392 | if (nfs4_setup_sequence(hdr->ds_clp, |
1408 | &hdr->args.seq_args, | 1393 | &hdr->args.seq_args, |
1409 | &hdr->res.seq_res, | 1394 | &hdr->res.seq_res, |
1410 | task)) | 1395 | task)) |
1411 | return; | 1396 | return; |
1412 | 1397 | ||
1413 | if (ff_layout_read_prepare_common(task, hdr)) | 1398 | if (ff_layout_read_prepare_common(task, hdr)) |
@@ -1578,10 +1563,10 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) | |||
1578 | { | 1563 | { |
1579 | struct nfs_pgio_header *hdr = data; | 1564 | struct nfs_pgio_header *hdr = data; |
1580 | 1565 | ||
1581 | if (ff_layout_setup_sequence(hdr->ds_clp, | 1566 | if (nfs4_setup_sequence(hdr->ds_clp, |
1582 | &hdr->args.seq_args, | 1567 | &hdr->args.seq_args, |
1583 | &hdr->res.seq_res, | 1568 | &hdr->res.seq_res, |
1584 | task)) | 1569 | task)) |
1585 | return; | 1570 | return; |
1586 | 1571 | ||
1587 | if (ff_layout_write_prepare_common(task, hdr)) | 1572 | if (ff_layout_write_prepare_common(task, hdr)) |
@@ -1667,10 +1652,10 @@ static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) | |||
1667 | { | 1652 | { |
1668 | struct nfs_commit_data *wdata = data; | 1653 | struct nfs_commit_data *wdata = data; |
1669 | 1654 | ||
1670 | if (ff_layout_setup_sequence(wdata->ds_clp, | 1655 | if (nfs4_setup_sequence(wdata->ds_clp, |
1671 | &wdata->args.seq_args, | 1656 | &wdata->args.seq_args, |
1672 | &wdata->res.seq_res, | 1657 | &wdata->res.seq_res, |
1673 | task)) | 1658 | task)) |
1674 | return; | 1659 | return; |
1675 | ff_layout_commit_prepare_common(task, data); | 1660 | ff_layout_commit_prepare_common(task, data); |
1676 | } | 1661 | } |
@@ -1965,10 +1950,7 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr, | |||
1965 | static void | 1950 | static void |
1966 | encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) | 1951 | encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) |
1967 | { | 1952 | { |
1968 | __be32 *p; | 1953 | WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); |
1969 | |||
1970 | p = xdr_reserve_space(xdr, len); | ||
1971 | xdr_encode_opaque_fixed(p, buf, len); | ||
1972 | } | 1954 | } |
1973 | 1955 | ||
1974 | static void | 1956 | static void |
@@ -2092,7 +2074,7 @@ ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) | |||
2092 | kfree(ff_args); | 2074 | kfree(ff_args); |
2093 | } | 2075 | } |
2094 | 2076 | ||
2095 | const struct nfs4_xdr_opaque_ops layoutreturn_ops = { | 2077 | static const struct nfs4_xdr_opaque_ops layoutreturn_ops = { |
2096 | .encode = ff_layout_encode_layoutreturn, | 2078 | .encode = ff_layout_encode_layoutreturn, |
2097 | .free = ff_layout_free_layoutreturn, | 2079 | .free = ff_layout_free_layoutreturn, |
2098 | }; | 2080 | }; |
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index d12ff9385f49..1e486c73ec94 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include "nfs42.h" | 12 | #include "nfs42.h" |
13 | #include "iostat.h" | 13 | #include "iostat.h" |
14 | #include "pnfs.h" | 14 | #include "pnfs.h" |
15 | #include "nfs4session.h" | ||
15 | #include "internal.h" | 16 | #include "internal.h" |
16 | 17 | ||
17 | #define NFSDBG_FACILITY NFSDBG_PROC | 18 | #define NFSDBG_FACILITY NFSDBG_PROC |
@@ -128,30 +129,26 @@ out_unlock: | |||
128 | return err; | 129 | return err; |
129 | } | 130 | } |
130 | 131 | ||
131 | static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, | 132 | static ssize_t _nfs42_proc_copy(struct file *src, |
132 | struct nfs_lock_context *src_lock, | 133 | struct nfs_lock_context *src_lock, |
133 | struct file *dst, loff_t pos_dst, | 134 | struct file *dst, |
134 | struct nfs_lock_context *dst_lock, | 135 | struct nfs_lock_context *dst_lock, |
135 | size_t count) | 136 | struct nfs42_copy_args *args, |
137 | struct nfs42_copy_res *res) | ||
136 | { | 138 | { |
137 | struct nfs42_copy_args args = { | ||
138 | .src_fh = NFS_FH(file_inode(src)), | ||
139 | .src_pos = pos_src, | ||
140 | .dst_fh = NFS_FH(file_inode(dst)), | ||
141 | .dst_pos = pos_dst, | ||
142 | .count = count, | ||
143 | }; | ||
144 | struct nfs42_copy_res res; | ||
145 | struct rpc_message msg = { | 139 | struct rpc_message msg = { |
146 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], | 140 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], |
147 | .rpc_argp = &args, | 141 | .rpc_argp = args, |
148 | .rpc_resp = &res, | 142 | .rpc_resp = res, |
149 | }; | 143 | }; |
150 | struct inode *dst_inode = file_inode(dst); | 144 | struct inode *dst_inode = file_inode(dst); |
151 | struct nfs_server *server = NFS_SERVER(dst_inode); | 145 | struct nfs_server *server = NFS_SERVER(dst_inode); |
146 | loff_t pos_src = args->src_pos; | ||
147 | loff_t pos_dst = args->dst_pos; | ||
148 | size_t count = args->count; | ||
152 | int status; | 149 | int status; |
153 | 150 | ||
154 | status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, | 151 | status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context, |
155 | src_lock, FMODE_READ); | 152 | src_lock, FMODE_READ); |
156 | if (status) | 153 | if (status) |
157 | return status; | 154 | return status; |
@@ -161,7 +158,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
161 | if (status) | 158 | if (status) |
162 | return status; | 159 | return status; |
163 | 160 | ||
164 | status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, | 161 | status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, |
165 | dst_lock, FMODE_WRITE); | 162 | dst_lock, FMODE_WRITE); |
166 | if (status) | 163 | if (status) |
167 | return status; | 164 | return status; |
@@ -171,22 +168,22 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
171 | return status; | 168 | return status; |
172 | 169 | ||
173 | status = nfs4_call_sync(server->client, server, &msg, | 170 | status = nfs4_call_sync(server->client, server, &msg, |
174 | &args.seq_args, &res.seq_res, 0); | 171 | &args->seq_args, &res->seq_res, 0); |
175 | if (status == -ENOTSUPP) | 172 | if (status == -ENOTSUPP) |
176 | server->caps &= ~NFS_CAP_COPY; | 173 | server->caps &= ~NFS_CAP_COPY; |
177 | if (status) | 174 | if (status) |
178 | return status; | 175 | return status; |
179 | 176 | ||
180 | if (res.write_res.verifier.committed != NFS_FILE_SYNC) { | 177 | if (res->write_res.verifier.committed != NFS_FILE_SYNC) { |
181 | status = nfs_commit_file(dst, &res.write_res.verifier.verifier); | 178 | status = nfs_commit_file(dst, &res->write_res.verifier.verifier); |
182 | if (status) | 179 | if (status) |
183 | return status; | 180 | return status; |
184 | } | 181 | } |
185 | 182 | ||
186 | truncate_pagecache_range(dst_inode, pos_dst, | 183 | truncate_pagecache_range(dst_inode, pos_dst, |
187 | pos_dst + res.write_res.count); | 184 | pos_dst + res->write_res.count); |
188 | 185 | ||
189 | return res.write_res.count; | 186 | return res->write_res.count; |
190 | } | 187 | } |
191 | 188 | ||
192 | ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, | 189 | ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, |
@@ -196,8 +193,22 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
196 | struct nfs_server *server = NFS_SERVER(file_inode(dst)); | 193 | struct nfs_server *server = NFS_SERVER(file_inode(dst)); |
197 | struct nfs_lock_context *src_lock; | 194 | struct nfs_lock_context *src_lock; |
198 | struct nfs_lock_context *dst_lock; | 195 | struct nfs_lock_context *dst_lock; |
199 | struct nfs4_exception src_exception = { }; | 196 | struct nfs42_copy_args args = { |
200 | struct nfs4_exception dst_exception = { }; | 197 | .src_fh = NFS_FH(file_inode(src)), |
198 | .src_pos = pos_src, | ||
199 | .dst_fh = NFS_FH(file_inode(dst)), | ||
200 | .dst_pos = pos_dst, | ||
201 | .count = count, | ||
202 | }; | ||
203 | struct nfs42_copy_res res; | ||
204 | struct nfs4_exception src_exception = { | ||
205 | .inode = file_inode(src), | ||
206 | .stateid = &args.src_stateid, | ||
207 | }; | ||
208 | struct nfs4_exception dst_exception = { | ||
209 | .inode = file_inode(dst), | ||
210 | .stateid = &args.dst_stateid, | ||
211 | }; | ||
201 | ssize_t err, err2; | 212 | ssize_t err, err2; |
202 | 213 | ||
203 | if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) | 214 | if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) |
@@ -207,7 +218,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
207 | if (IS_ERR(src_lock)) | 218 | if (IS_ERR(src_lock)) |
208 | return PTR_ERR(src_lock); | 219 | return PTR_ERR(src_lock); |
209 | 220 | ||
210 | src_exception.inode = file_inode(src); | ||
211 | src_exception.state = src_lock->open_context->state; | 221 | src_exception.state = src_lock->open_context->state; |
212 | 222 | ||
213 | dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); | 223 | dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); |
@@ -216,15 +226,17 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
216 | goto out_put_src_lock; | 226 | goto out_put_src_lock; |
217 | } | 227 | } |
218 | 228 | ||
219 | dst_exception.inode = file_inode(dst); | ||
220 | dst_exception.state = dst_lock->open_context->state; | 229 | dst_exception.state = dst_lock->open_context->state; |
221 | 230 | ||
222 | do { | 231 | do { |
223 | inode_lock(file_inode(dst)); | 232 | inode_lock(file_inode(dst)); |
224 | err = _nfs42_proc_copy(src, pos_src, src_lock, | 233 | err = _nfs42_proc_copy(src, src_lock, |
225 | dst, pos_dst, dst_lock, count); | 234 | dst, dst_lock, |
235 | &args, &res); | ||
226 | inode_unlock(file_inode(dst)); | 236 | inode_unlock(file_inode(dst)); |
227 | 237 | ||
238 | if (err >= 0) | ||
239 | break; | ||
228 | if (err == -ENOTSUPP) { | 240 | if (err == -ENOTSUPP) { |
229 | err = -EOPNOTSUPP; | 241 | err = -EOPNOTSUPP; |
230 | break; | 242 | break; |
@@ -331,9 +343,8 @@ nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) | |||
331 | } | 343 | } |
332 | nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); | 344 | nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); |
333 | spin_unlock(&inode->i_lock); | 345 | spin_unlock(&inode->i_lock); |
334 | nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args, | 346 | nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, |
335 | &data->res.seq_res, task); | 347 | &data->res.seq_res, task); |
336 | |||
337 | } | 348 | } |
338 | 349 | ||
339 | static void | 350 | static void |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 665165833660..af285cc27ccf 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -273,14 +273,6 @@ extern int nfs4_set_rw_stateid(nfs4_stateid *stateid, | |||
273 | fmode_t fmode); | 273 | fmode_t fmode); |
274 | 274 | ||
275 | #if defined(CONFIG_NFS_V4_1) | 275 | #if defined(CONFIG_NFS_V4_1) |
276 | static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) | ||
277 | { | ||
278 | return server->nfs_client->cl_session; | ||
279 | } | ||
280 | |||
281 | extern int nfs41_setup_sequence(struct nfs4_session *session, | ||
282 | struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, | ||
283 | struct rpc_task *task); | ||
284 | extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *); | 276 | extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *); |
285 | extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *); | 277 | extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *); |
286 | extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *); | 278 | extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *); |
@@ -357,11 +349,6 @@ nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, | |||
357 | hdr->args.stable = NFS_FILE_SYNC; | 349 | hdr->args.stable = NFS_FILE_SYNC; |
358 | } | 350 | } |
359 | #else /* CONFIG_NFS_v4_1 */ | 351 | #else /* CONFIG_NFS_v4_1 */ |
360 | static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) | ||
361 | { | ||
362 | return NULL; | ||
363 | } | ||
364 | |||
365 | static inline bool | 352 | static inline bool |
366 | is_ds_only_client(struct nfs_client *clp) | 353 | is_ds_only_client(struct nfs_client *clp) |
367 | { | 354 | { |
@@ -466,7 +453,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); | |||
466 | extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); | 453 | extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); |
467 | extern void nfs_release_seqid(struct nfs_seqid *seqid); | 454 | extern void nfs_release_seqid(struct nfs_seqid *seqid); |
468 | extern void nfs_free_seqid(struct nfs_seqid *seqid); | 455 | extern void nfs_free_seqid(struct nfs_seqid *seqid); |
469 | extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl, | 456 | extern int nfs4_setup_sequence(const struct nfs_client *client, |
470 | struct nfs4_sequence_args *args, | 457 | struct nfs4_sequence_args *args, |
471 | struct nfs4_sequence_res *res, | 458 | struct nfs4_sequence_res *res, |
472 | struct rpc_task *task); | 459 | struct rpc_task *task); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0a0eaecf9676..1b183686c6d4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -577,12 +577,7 @@ nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, | |||
577 | static bool _nfs4_is_integrity_protected(struct nfs_client *clp) | 577 | static bool _nfs4_is_integrity_protected(struct nfs_client *clp) |
578 | { | 578 | { |
579 | rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; | 579 | rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; |
580 | 580 | return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); | |
581 | if (flavor == RPC_AUTH_GSS_KRB5I || | ||
582 | flavor == RPC_AUTH_GSS_KRB5P) | ||
583 | return true; | ||
584 | |||
585 | return false; | ||
586 | } | 581 | } |
587 | 582 | ||
588 | static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) | 583 | static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) |
@@ -622,48 +617,6 @@ static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) | |||
622 | args->sa_privileged = 1; | 617 | args->sa_privileged = 1; |
623 | } | 618 | } |
624 | 619 | ||
625 | int nfs40_setup_sequence(struct nfs4_slot_table *tbl, | ||
626 | struct nfs4_sequence_args *args, | ||
627 | struct nfs4_sequence_res *res, | ||
628 | struct rpc_task *task) | ||
629 | { | ||
630 | struct nfs4_slot *slot; | ||
631 | |||
632 | /* slot already allocated? */ | ||
633 | if (res->sr_slot != NULL) | ||
634 | goto out_start; | ||
635 | |||
636 | spin_lock(&tbl->slot_tbl_lock); | ||
637 | if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) | ||
638 | goto out_sleep; | ||
639 | |||
640 | slot = nfs4_alloc_slot(tbl); | ||
641 | if (IS_ERR(slot)) { | ||
642 | if (slot == ERR_PTR(-ENOMEM)) | ||
643 | task->tk_timeout = HZ >> 2; | ||
644 | goto out_sleep; | ||
645 | } | ||
646 | spin_unlock(&tbl->slot_tbl_lock); | ||
647 | |||
648 | slot->privileged = args->sa_privileged ? 1 : 0; | ||
649 | args->sa_slot = slot; | ||
650 | res->sr_slot = slot; | ||
651 | |||
652 | out_start: | ||
653 | rpc_call_start(task); | ||
654 | return 0; | ||
655 | |||
656 | out_sleep: | ||
657 | if (args->sa_privileged) | ||
658 | rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, | ||
659 | NULL, RPC_PRIORITY_PRIVILEGED); | ||
660 | else | ||
661 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | ||
662 | spin_unlock(&tbl->slot_tbl_lock); | ||
663 | return -EAGAIN; | ||
664 | } | ||
665 | EXPORT_SYMBOL_GPL(nfs40_setup_sequence); | ||
666 | |||
667 | static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) | 620 | static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) |
668 | { | 621 | { |
669 | struct nfs4_slot *slot = res->sr_slot; | 622 | struct nfs4_slot *slot = res->sr_slot; |
@@ -815,10 +768,6 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
815 | case -NFS4ERR_SEQ_FALSE_RETRY: | 768 | case -NFS4ERR_SEQ_FALSE_RETRY: |
816 | ++slot->seq_nr; | 769 | ++slot->seq_nr; |
817 | goto retry_nowait; | 770 | goto retry_nowait; |
818 | case -NFS4ERR_DEADSESSION: | ||
819 | case -NFS4ERR_BADSESSION: | ||
820 | nfs4_schedule_session_recovery(session, res->sr_status); | ||
821 | goto retry_nowait; | ||
822 | default: | 771 | default: |
823 | /* Just update the slot sequence no. */ | 772 | /* Just update the slot sequence no. */ |
824 | slot->seq_done = 1; | 773 | slot->seq_done = 1; |
@@ -882,101 +831,14 @@ int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) | |||
882 | } | 831 | } |
883 | EXPORT_SYMBOL_GPL(nfs4_sequence_done); | 832 | EXPORT_SYMBOL_GPL(nfs4_sequence_done); |
884 | 833 | ||
885 | int nfs41_setup_sequence(struct nfs4_session *session, | ||
886 | struct nfs4_sequence_args *args, | ||
887 | struct nfs4_sequence_res *res, | ||
888 | struct rpc_task *task) | ||
889 | { | ||
890 | struct nfs4_slot *slot; | ||
891 | struct nfs4_slot_table *tbl; | ||
892 | |||
893 | dprintk("--> %s\n", __func__); | ||
894 | /* slot already allocated? */ | ||
895 | if (res->sr_slot != NULL) | ||
896 | goto out_success; | ||
897 | |||
898 | tbl = &session->fc_slot_table; | ||
899 | |||
900 | task->tk_timeout = 0; | ||
901 | |||
902 | spin_lock(&tbl->slot_tbl_lock); | ||
903 | if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && | ||
904 | !args->sa_privileged) { | ||
905 | /* The state manager will wait until the slot table is empty */ | ||
906 | dprintk("%s session is draining\n", __func__); | ||
907 | goto out_sleep; | ||
908 | } | ||
909 | |||
910 | slot = nfs4_alloc_slot(tbl); | ||
911 | if (IS_ERR(slot)) { | ||
912 | /* If out of memory, try again in 1/4 second */ | ||
913 | if (slot == ERR_PTR(-ENOMEM)) | ||
914 | task->tk_timeout = HZ >> 2; | ||
915 | dprintk("<-- %s: no free slots\n", __func__); | ||
916 | goto out_sleep; | ||
917 | } | ||
918 | spin_unlock(&tbl->slot_tbl_lock); | ||
919 | |||
920 | slot->privileged = args->sa_privileged ? 1 : 0; | ||
921 | args->sa_slot = slot; | ||
922 | |||
923 | dprintk("<-- %s slotid=%u seqid=%u\n", __func__, | ||
924 | slot->slot_nr, slot->seq_nr); | ||
925 | |||
926 | res->sr_slot = slot; | ||
927 | res->sr_timestamp = jiffies; | ||
928 | res->sr_status_flags = 0; | ||
929 | /* | ||
930 | * sr_status is only set in decode_sequence, and so will remain | ||
931 | * set to 1 if an rpc level failure occurs. | ||
932 | */ | ||
933 | res->sr_status = 1; | ||
934 | trace_nfs4_setup_sequence(session, args); | ||
935 | out_success: | ||
936 | rpc_call_start(task); | ||
937 | return 0; | ||
938 | out_sleep: | ||
939 | /* Privileged tasks are queued with top priority */ | ||
940 | if (args->sa_privileged) | ||
941 | rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, | ||
942 | NULL, RPC_PRIORITY_PRIVILEGED); | ||
943 | else | ||
944 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | ||
945 | spin_unlock(&tbl->slot_tbl_lock); | ||
946 | return -EAGAIN; | ||
947 | } | ||
948 | EXPORT_SYMBOL_GPL(nfs41_setup_sequence); | ||
949 | |||
950 | static int nfs4_setup_sequence(const struct nfs_server *server, | ||
951 | struct nfs4_sequence_args *args, | ||
952 | struct nfs4_sequence_res *res, | ||
953 | struct rpc_task *task) | ||
954 | { | ||
955 | struct nfs4_session *session = nfs4_get_session(server); | ||
956 | int ret = 0; | ||
957 | |||
958 | if (!session) | ||
959 | return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, | ||
960 | args, res, task); | ||
961 | |||
962 | dprintk("--> %s clp %p session %p sr_slot %u\n", | ||
963 | __func__, session->clp, session, res->sr_slot ? | ||
964 | res->sr_slot->slot_nr : NFS4_NO_SLOT); | ||
965 | |||
966 | ret = nfs41_setup_sequence(session, args, res, task); | ||
967 | |||
968 | dprintk("<-- %s status=%d\n", __func__, ret); | ||
969 | return ret; | ||
970 | } | ||
971 | |||
972 | static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) | 834 | static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) |
973 | { | 835 | { |
974 | struct nfs4_call_sync_data *data = calldata; | 836 | struct nfs4_call_sync_data *data = calldata; |
975 | struct nfs4_session *session = nfs4_get_session(data->seq_server); | ||
976 | 837 | ||
977 | dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); | 838 | dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); |
978 | 839 | ||
979 | nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); | 840 | nfs4_setup_sequence(data->seq_server->nfs_client, |
841 | data->seq_args, data->seq_res, task); | ||
980 | } | 842 | } |
981 | 843 | ||
982 | static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) | 844 | static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) |
@@ -993,15 +855,6 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { | |||
993 | 855 | ||
994 | #else /* !CONFIG_NFS_V4_1 */ | 856 | #else /* !CONFIG_NFS_V4_1 */ |
995 | 857 | ||
996 | static int nfs4_setup_sequence(const struct nfs_server *server, | ||
997 | struct nfs4_sequence_args *args, | ||
998 | struct nfs4_sequence_res *res, | ||
999 | struct rpc_task *task) | ||
1000 | { | ||
1001 | return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, | ||
1002 | args, res, task); | ||
1003 | } | ||
1004 | |||
1005 | static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) | 858 | static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) |
1006 | { | 859 | { |
1007 | return nfs40_sequence_done(task, res); | 860 | return nfs40_sequence_done(task, res); |
@@ -1022,10 +875,68 @@ EXPORT_SYMBOL_GPL(nfs4_sequence_done); | |||
1022 | 875 | ||
1023 | #endif /* !CONFIG_NFS_V4_1 */ | 876 | #endif /* !CONFIG_NFS_V4_1 */ |
1024 | 877 | ||
878 | int nfs4_setup_sequence(const struct nfs_client *client, | ||
879 | struct nfs4_sequence_args *args, | ||
880 | struct nfs4_sequence_res *res, | ||
881 | struct rpc_task *task) | ||
882 | { | ||
883 | struct nfs4_session *session = nfs4_get_session(client); | ||
884 | struct nfs4_slot_table *tbl = client->cl_slot_tbl; | ||
885 | struct nfs4_slot *slot; | ||
886 | |||
887 | /* slot already allocated? */ | ||
888 | if (res->sr_slot != NULL) | ||
889 | goto out_start; | ||
890 | |||
891 | if (session) { | ||
892 | tbl = &session->fc_slot_table; | ||
893 | task->tk_timeout = 0; | ||
894 | } | ||
895 | |||
896 | spin_lock(&tbl->slot_tbl_lock); | ||
897 | /* The state manager will wait until the slot table is empty */ | ||
898 | if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) | ||
899 | goto out_sleep; | ||
900 | |||
901 | slot = nfs4_alloc_slot(tbl); | ||
902 | if (IS_ERR(slot)) { | ||
903 | /* Try again in 1/4 second */ | ||
904 | if (slot == ERR_PTR(-ENOMEM)) | ||
905 | task->tk_timeout = HZ >> 2; | ||
906 | goto out_sleep; | ||
907 | } | ||
908 | spin_unlock(&tbl->slot_tbl_lock); | ||
909 | |||
910 | slot->privileged = args->sa_privileged ? 1 : 0; | ||
911 | args->sa_slot = slot; | ||
912 | |||
913 | res->sr_slot = slot; | ||
914 | if (session) { | ||
915 | res->sr_timestamp = jiffies; | ||
916 | res->sr_status_flags = 0; | ||
917 | res->sr_status = 1; | ||
918 | } | ||
919 | |||
920 | trace_nfs4_setup_sequence(session, args); | ||
921 | out_start: | ||
922 | rpc_call_start(task); | ||
923 | return 0; | ||
924 | |||
925 | out_sleep: | ||
926 | if (args->sa_privileged) | ||
927 | rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, | ||
928 | NULL, RPC_PRIORITY_PRIVILEGED); | ||
929 | else | ||
930 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | ||
931 | spin_unlock(&tbl->slot_tbl_lock); | ||
932 | return -EAGAIN; | ||
933 | } | ||
934 | EXPORT_SYMBOL_GPL(nfs4_setup_sequence); | ||
935 | |||
1025 | static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) | 936 | static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) |
1026 | { | 937 | { |
1027 | struct nfs4_call_sync_data *data = calldata; | 938 | struct nfs4_call_sync_data *data = calldata; |
1028 | nfs4_setup_sequence(data->seq_server, | 939 | nfs4_setup_sequence(data->seq_server->nfs_client, |
1029 | data->seq_args, data->seq_res, task); | 940 | data->seq_args, data->seq_res, task); |
1030 | } | 941 | } |
1031 | 942 | ||
@@ -1330,14 +1241,6 @@ static void nfs4_opendata_put(struct nfs4_opendata *p) | |||
1330 | kref_put(&p->kref, nfs4_opendata_free); | 1241 | kref_put(&p->kref, nfs4_opendata_free); |
1331 | } | 1242 | } |
1332 | 1243 | ||
1333 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | ||
1334 | { | ||
1335 | int ret; | ||
1336 | |||
1337 | ret = rpc_wait_for_completion_task(task); | ||
1338 | return ret; | ||
1339 | } | ||
1340 | |||
1341 | static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, | 1244 | static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, |
1342 | fmode_t fmode) | 1245 | fmode_t fmode) |
1343 | { | 1246 | { |
@@ -1732,17 +1635,15 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) | |||
1732 | int ret; | 1635 | int ret; |
1733 | 1636 | ||
1734 | if (!data->rpc_done) { | 1637 | if (!data->rpc_done) { |
1735 | if (data->rpc_status) { | 1638 | if (data->rpc_status) |
1736 | ret = data->rpc_status; | 1639 | return ERR_PTR(data->rpc_status); |
1737 | goto err; | ||
1738 | } | ||
1739 | /* cached opens have already been processed */ | 1640 | /* cached opens have already been processed */ |
1740 | goto update; | 1641 | goto update; |
1741 | } | 1642 | } |
1742 | 1643 | ||
1743 | ret = nfs_refresh_inode(inode, &data->f_attr); | 1644 | ret = nfs_refresh_inode(inode, &data->f_attr); |
1744 | if (ret) | 1645 | if (ret) |
1745 | goto err; | 1646 | return ERR_PTR(ret); |
1746 | 1647 | ||
1747 | if (data->o_res.delegation_type != 0) | 1648 | if (data->o_res.delegation_type != 0) |
1748 | nfs4_opendata_check_deleg(data, state); | 1649 | nfs4_opendata_check_deleg(data, state); |
@@ -1752,9 +1653,6 @@ update: | |||
1752 | atomic_inc(&state->count); | 1653 | atomic_inc(&state->count); |
1753 | 1654 | ||
1754 | return state; | 1655 | return state; |
1755 | err: | ||
1756 | return ERR_PTR(ret); | ||
1757 | |||
1758 | } | 1656 | } |
1759 | 1657 | ||
1760 | static struct nfs4_state * | 1658 | static struct nfs4_state * |
@@ -2048,8 +1946,8 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) | |||
2048 | { | 1946 | { |
2049 | struct nfs4_opendata *data = calldata; | 1947 | struct nfs4_opendata *data = calldata; |
2050 | 1948 | ||
2051 | nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, | 1949 | nfs4_setup_sequence(data->o_arg.server->nfs_client, |
2052 | &data->c_arg.seq_args, &data->c_res.seq_res, task); | 1950 | &data->c_arg.seq_args, &data->c_res.seq_res, task); |
2053 | } | 1951 | } |
2054 | 1952 | ||
2055 | static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) | 1953 | static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) |
@@ -2124,7 +2022,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) | |||
2124 | task = rpc_run_task(&task_setup_data); | 2022 | task = rpc_run_task(&task_setup_data); |
2125 | if (IS_ERR(task)) | 2023 | if (IS_ERR(task)) |
2126 | return PTR_ERR(task); | 2024 | return PTR_ERR(task); |
2127 | status = nfs4_wait_for_completion_rpc_task(task); | 2025 | status = rpc_wait_for_completion_task(task); |
2128 | if (status != 0) { | 2026 | if (status != 0) { |
2129 | data->cancelled = 1; | 2027 | data->cancelled = 1; |
2130 | smp_wmb(); | 2028 | smp_wmb(); |
@@ -2172,7 +2070,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) | |||
2172 | nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); | 2070 | nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); |
2173 | } | 2071 | } |
2174 | data->timestamp = jiffies; | 2072 | data->timestamp = jiffies; |
2175 | if (nfs4_setup_sequence(data->o_arg.server, | 2073 | if (nfs4_setup_sequence(data->o_arg.server->nfs_client, |
2176 | &data->o_arg.seq_args, | 2074 | &data->o_arg.seq_args, |
2177 | &data->o_res.seq_res, | 2075 | &data->o_res.seq_res, |
2178 | task) != 0) | 2076 | task) != 0) |
@@ -2289,15 +2187,15 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) | |||
2289 | data->is_recover = 1; | 2187 | data->is_recover = 1; |
2290 | } | 2188 | } |
2291 | task = rpc_run_task(&task_setup_data); | 2189 | task = rpc_run_task(&task_setup_data); |
2292 | if (IS_ERR(task)) | 2190 | if (IS_ERR(task)) |
2293 | return PTR_ERR(task); | 2191 | return PTR_ERR(task); |
2294 | status = nfs4_wait_for_completion_rpc_task(task); | 2192 | status = rpc_wait_for_completion_task(task); |
2295 | if (status != 0) { | 2193 | if (status != 0) { |
2296 | data->cancelled = 1; | 2194 | data->cancelled = 1; |
2297 | smp_wmb(); | 2195 | smp_wmb(); |
2298 | } else | 2196 | } else |
2299 | status = data->rpc_status; | 2197 | status = data->rpc_status; |
2300 | rpc_put_task(task); | 2198 | rpc_put_task(task); |
2301 | 2199 | ||
2302 | return status; | 2200 | return status; |
2303 | } | 2201 | } |
@@ -2306,7 +2204,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) | |||
2306 | { | 2204 | { |
2307 | struct inode *dir = d_inode(data->dir); | 2205 | struct inode *dir = d_inode(data->dir); |
2308 | struct nfs_openres *o_res = &data->o_res; | 2206 | struct nfs_openres *o_res = &data->o_res; |
2309 | int status; | 2207 | int status; |
2310 | 2208 | ||
2311 | status = nfs4_run_open_task(data, 1); | 2209 | status = nfs4_run_open_task(data, 1); |
2312 | if (status != 0 || !data->rpc_done) | 2210 | if (status != 0 || !data->rpc_done) |
@@ -2314,11 +2212,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) | |||
2314 | 2212 | ||
2315 | nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); | 2213 | nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); |
2316 | 2214 | ||
2317 | if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 2215 | if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) |
2318 | status = _nfs4_proc_open_confirm(data); | 2216 | status = _nfs4_proc_open_confirm(data); |
2319 | if (status != 0) | ||
2320 | return status; | ||
2321 | } | ||
2322 | 2217 | ||
2323 | return status; | 2218 | return status; |
2324 | } | 2219 | } |
@@ -2412,11 +2307,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
2412 | return 0; | 2307 | return 0; |
2413 | } | 2308 | } |
2414 | 2309 | ||
2415 | static int nfs4_recover_expired_lease(struct nfs_server *server) | ||
2416 | { | ||
2417 | return nfs4_client_recover_expired_lease(server->nfs_client); | ||
2418 | } | ||
2419 | |||
2420 | /* | 2310 | /* |
2421 | * OPEN_EXPIRED: | 2311 | * OPEN_EXPIRED: |
2422 | * reclaim state on the server after a network partition. | 2312 | * reclaim state on the server after a network partition. |
@@ -2730,6 +2620,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, | |||
2730 | ret = PTR_ERR(state); | 2620 | ret = PTR_ERR(state); |
2731 | if (IS_ERR(state)) | 2621 | if (IS_ERR(state)) |
2732 | goto out; | 2622 | goto out; |
2623 | ctx->state = state; | ||
2733 | if (server->caps & NFS_CAP_POSIX_LOCK) | 2624 | if (server->caps & NFS_CAP_POSIX_LOCK) |
2734 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | 2625 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); |
2735 | if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) | 2626 | if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) |
@@ -2755,7 +2646,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, | |||
2755 | if (ret != 0) | 2646 | if (ret != 0) |
2756 | goto out; | 2647 | goto out; |
2757 | 2648 | ||
2758 | ctx->state = state; | ||
2759 | if (d_inode(dentry) == state->inode) { | 2649 | if (d_inode(dentry) == state->inode) { |
2760 | nfs_inode_attach_open_context(ctx); | 2650 | nfs_inode_attach_open_context(ctx); |
2761 | if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) | 2651 | if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) |
@@ -2794,7 +2684,7 @@ static int _nfs4_do_open(struct inode *dir, | |||
2794 | dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); | 2684 | dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); |
2795 | goto out_err; | 2685 | goto out_err; |
2796 | } | 2686 | } |
2797 | status = nfs4_recover_expired_lease(server); | 2687 | status = nfs4_client_recover_expired_lease(server->nfs_client); |
2798 | if (status != 0) | 2688 | if (status != 0) |
2799 | goto err_put_state_owner; | 2689 | goto err_put_state_owner; |
2800 | if (d_really_is_positive(dentry)) | 2690 | if (d_really_is_positive(dentry)) |
@@ -2940,12 +2830,12 @@ static int _nfs4_do_setattr(struct inode *inode, | |||
2940 | struct nfs_open_context *ctx) | 2830 | struct nfs_open_context *ctx) |
2941 | { | 2831 | { |
2942 | struct nfs_server *server = NFS_SERVER(inode); | 2832 | struct nfs_server *server = NFS_SERVER(inode); |
2943 | struct rpc_message msg = { | 2833 | struct rpc_message msg = { |
2944 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], | 2834 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], |
2945 | .rpc_argp = arg, | 2835 | .rpc_argp = arg, |
2946 | .rpc_resp = res, | 2836 | .rpc_resp = res, |
2947 | .rpc_cred = cred, | 2837 | .rpc_cred = cred, |
2948 | }; | 2838 | }; |
2949 | struct rpc_cred *delegation_cred = NULL; | 2839 | struct rpc_cred *delegation_cred = NULL; |
2950 | unsigned long timestamp = jiffies; | 2840 | unsigned long timestamp = jiffies; |
2951 | fmode_t fmode; | 2841 | fmode_t fmode; |
@@ -2993,18 +2883,18 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, | |||
2993 | { | 2883 | { |
2994 | struct nfs_server *server = NFS_SERVER(inode); | 2884 | struct nfs_server *server = NFS_SERVER(inode); |
2995 | struct nfs4_state *state = ctx ? ctx->state : NULL; | 2885 | struct nfs4_state *state = ctx ? ctx->state : NULL; |
2996 | struct nfs_setattrargs arg = { | 2886 | struct nfs_setattrargs arg = { |
2997 | .fh = NFS_FH(inode), | 2887 | .fh = NFS_FH(inode), |
2998 | .iap = sattr, | 2888 | .iap = sattr, |
2999 | .server = server, | 2889 | .server = server, |
3000 | .bitmask = server->attr_bitmask, | 2890 | .bitmask = server->attr_bitmask, |
3001 | .label = ilabel, | 2891 | .label = ilabel, |
3002 | }; | 2892 | }; |
3003 | struct nfs_setattrres res = { | 2893 | struct nfs_setattrres res = { |
3004 | .fattr = fattr, | 2894 | .fattr = fattr, |
3005 | .label = olabel, | 2895 | .label = olabel, |
3006 | .server = server, | 2896 | .server = server, |
3007 | }; | 2897 | }; |
3008 | struct nfs4_exception exception = { | 2898 | struct nfs4_exception exception = { |
3009 | .state = state, | 2899 | .state = state, |
3010 | .inode = inode, | 2900 | .inode = inode, |
@@ -3118,7 +3008,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
3118 | } | 3008 | } |
3119 | } | 3009 | } |
3120 | 3010 | ||
3121 | /* hmm. we are done with the inode, and in the process of freeing | 3011 | /* hmm. we are done with the inode, and in the process of freeing |
3122 | * the state_owner. we keep this around to process errors | 3012 | * the state_owner. we keep this around to process errors |
3123 | */ | 3013 | */ |
3124 | switch (task->tk_status) { | 3014 | switch (task->tk_status) { |
@@ -3234,7 +3124,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
3234 | else if (calldata->arg.bitmask == NULL) | 3124 | else if (calldata->arg.bitmask == NULL) |
3235 | calldata->res.fattr = NULL; | 3125 | calldata->res.fattr = NULL; |
3236 | calldata->timestamp = jiffies; | 3126 | calldata->timestamp = jiffies; |
3237 | if (nfs4_setup_sequence(NFS_SERVER(inode), | 3127 | if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, |
3238 | &calldata->arg.seq_args, | 3128 | &calldata->arg.seq_args, |
3239 | &calldata->res.seq_res, | 3129 | &calldata->res.seq_res, |
3240 | task) != 0) | 3130 | task) != 0) |
@@ -3522,16 +3412,11 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl | |||
3522 | .pseudoflavor = flavor, | 3412 | .pseudoflavor = flavor, |
3523 | }; | 3413 | }; |
3524 | struct rpc_auth *auth; | 3414 | struct rpc_auth *auth; |
3525 | int ret; | ||
3526 | 3415 | ||
3527 | auth = rpcauth_create(&auth_args, server->client); | 3416 | auth = rpcauth_create(&auth_args, server->client); |
3528 | if (IS_ERR(auth)) { | 3417 | if (IS_ERR(auth)) |
3529 | ret = -EACCES; | 3418 | return -EACCES; |
3530 | goto out; | 3419 | return nfs4_lookup_root(server, fhandle, info); |
3531 | } | ||
3532 | ret = nfs4_lookup_root(server, fhandle, info); | ||
3533 | out: | ||
3534 | return ret; | ||
3535 | } | 3420 | } |
3536 | 3421 | ||
3537 | /* | 3422 | /* |
@@ -4114,7 +3999,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) | |||
4114 | 3999 | ||
4115 | static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) | 4000 | static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) |
4116 | { | 4001 | { |
4117 | nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), | 4002 | nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, |
4118 | &data->args.seq_args, | 4003 | &data->args.seq_args, |
4119 | &data->res.seq_res, | 4004 | &data->res.seq_res, |
4120 | task); | 4005 | task); |
@@ -4148,7 +4033,7 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) | |||
4148 | 4033 | ||
4149 | static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) | 4034 | static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) |
4150 | { | 4035 | { |
4151 | nfs4_setup_sequence(NFS_SERVER(data->old_dir), | 4036 | nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, |
4152 | &data->args.seq_args, | 4037 | &data->args.seq_args, |
4153 | &data->res.seq_res, | 4038 | &data->res.seq_res, |
4154 | task); | 4039 | task); |
@@ -4723,7 +4608,7 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, | |||
4723 | static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, | 4608 | static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, |
4724 | struct nfs_pgio_header *hdr) | 4609 | struct nfs_pgio_header *hdr) |
4725 | { | 4610 | { |
4726 | if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), | 4611 | if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, |
4727 | &hdr->args.seq_args, | 4612 | &hdr->args.seq_args, |
4728 | &hdr->res.seq_res, | 4613 | &hdr->res.seq_res, |
4729 | task)) | 4614 | task)) |
@@ -4822,7 +4707,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, | |||
4822 | 4707 | ||
4823 | static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) | 4708 | static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) |
4824 | { | 4709 | { |
4825 | nfs4_setup_sequence(NFS_SERVER(data->inode), | 4710 | nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, |
4826 | &data->args.seq_args, | 4711 | &data->args.seq_args, |
4827 | &data->res.seq_res, | 4712 | &data->res.seq_res, |
4828 | task); | 4713 | task); |
@@ -4975,8 +4860,8 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, | |||
4975 | if (newpage == NULL) | 4860 | if (newpage == NULL) |
4976 | goto unwind; | 4861 | goto unwind; |
4977 | memcpy(page_address(newpage), buf, len); | 4862 | memcpy(page_address(newpage), buf, len); |
4978 | buf += len; | 4863 | buf += len; |
4979 | buflen -= len; | 4864 | buflen -= len; |
4980 | *pages++ = newpage; | 4865 | *pages++ = newpage; |
4981 | rc++; | 4866 | rc++; |
4982 | } while (buflen != 0); | 4867 | } while (buflen != 0); |
@@ -5069,7 +4954,7 @@ out: | |||
5069 | */ | 4954 | */ |
5070 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 4955 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
5071 | { | 4956 | { |
5072 | struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; | 4957 | struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; |
5073 | struct nfs_getaclargs args = { | 4958 | struct nfs_getaclargs args = { |
5074 | .fh = NFS_FH(inode), | 4959 | .fh = NFS_FH(inode), |
5075 | .acl_pages = pages, | 4960 | .acl_pages = pages, |
@@ -5083,13 +4968,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu | |||
5083 | .rpc_argp = &args, | 4968 | .rpc_argp = &args, |
5084 | .rpc_resp = &res, | 4969 | .rpc_resp = &res, |
5085 | }; | 4970 | }; |
5086 | unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); | 4971 | unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; |
5087 | int ret = -ENOMEM, i; | 4972 | int ret = -ENOMEM, i; |
5088 | 4973 | ||
5089 | /* As long as we're doing a round trip to the server anyway, | ||
5090 | * let's be prepared for a page of acl data. */ | ||
5091 | if (npages == 0) | ||
5092 | npages = 1; | ||
5093 | if (npages > ARRAY_SIZE(pages)) | 4974 | if (npages > ARRAY_SIZE(pages)) |
5094 | return -ERANGE; | 4975 | return -ERANGE; |
5095 | 4976 | ||
@@ -5299,8 +5180,8 @@ static int _nfs4_do_set_security_label(struct inode *inode, | |||
5299 | struct nfs_server *server = NFS_SERVER(inode); | 5180 | struct nfs_server *server = NFS_SERVER(inode); |
5300 | const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; | 5181 | const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; |
5301 | struct nfs_setattrargs arg = { | 5182 | struct nfs_setattrargs arg = { |
5302 | .fh = NFS_FH(inode), | 5183 | .fh = NFS_FH(inode), |
5303 | .iap = &sattr, | 5184 | .iap = &sattr, |
5304 | .server = server, | 5185 | .server = server, |
5305 | .bitmask = bitmask, | 5186 | .bitmask = bitmask, |
5306 | .label = ilabel, | 5187 | .label = ilabel, |
@@ -5311,9 +5192,9 @@ static int _nfs4_do_set_security_label(struct inode *inode, | |||
5311 | .server = server, | 5192 | .server = server, |
5312 | }; | 5193 | }; |
5313 | struct rpc_message msg = { | 5194 | struct rpc_message msg = { |
5314 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], | 5195 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], |
5315 | .rpc_argp = &arg, | 5196 | .rpc_argp = &arg, |
5316 | .rpc_resp = &res, | 5197 | .rpc_resp = &res, |
5317 | }; | 5198 | }; |
5318 | int status; | 5199 | int status; |
5319 | 5200 | ||
@@ -5747,7 +5628,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) | |||
5747 | if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) | 5628 | if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) |
5748 | return; | 5629 | return; |
5749 | 5630 | ||
5750 | nfs4_setup_sequence(d_data->res.server, | 5631 | nfs4_setup_sequence(d_data->res.server->nfs_client, |
5751 | &d_data->args.seq_args, | 5632 | &d_data->args.seq_args, |
5752 | &d_data->res.seq_res, | 5633 | &d_data->res.seq_res, |
5753 | task); | 5634 | task); |
@@ -5817,7 +5698,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co | |||
5817 | return PTR_ERR(task); | 5698 | return PTR_ERR(task); |
5818 | if (!issync) | 5699 | if (!issync) |
5819 | goto out; | 5700 | goto out; |
5820 | status = nfs4_wait_for_completion_rpc_task(task); | 5701 | status = rpc_wait_for_completion_task(task); |
5821 | if (status != 0) | 5702 | if (status != 0) |
5822 | goto out; | 5703 | goto out; |
5823 | status = data->rpc_status; | 5704 | status = data->rpc_status; |
@@ -5859,8 +5740,8 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock | |||
5859 | }; | 5740 | }; |
5860 | struct rpc_message msg = { | 5741 | struct rpc_message msg = { |
5861 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], | 5742 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], |
5862 | .rpc_argp = &arg, | 5743 | .rpc_argp = &arg, |
5863 | .rpc_resp = &res, | 5744 | .rpc_resp = &res, |
5864 | .rpc_cred = state->owner->so_cred, | 5745 | .rpc_cred = state->owner->so_cred, |
5865 | }; | 5746 | }; |
5866 | struct nfs4_lock_state *lsp; | 5747 | struct nfs4_lock_state *lsp; |
@@ -5989,7 +5870,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) | |||
5989 | goto out_no_action; | 5870 | goto out_no_action; |
5990 | } | 5871 | } |
5991 | calldata->timestamp = jiffies; | 5872 | calldata->timestamp = jiffies; |
5992 | if (nfs4_setup_sequence(calldata->server, | 5873 | if (nfs4_setup_sequence(calldata->server->nfs_client, |
5993 | &calldata->arg.seq_args, | 5874 | &calldata->arg.seq_args, |
5994 | &calldata->res.seq_res, | 5875 | &calldata->res.seq_res, |
5995 | task) != 0) | 5876 | task) != 0) |
@@ -6087,7 +5968,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * | |||
6087 | status = PTR_ERR(task); | 5968 | status = PTR_ERR(task); |
6088 | if (IS_ERR(task)) | 5969 | if (IS_ERR(task)) |
6089 | goto out; | 5970 | goto out; |
6090 | status = nfs4_wait_for_completion_rpc_task(task); | 5971 | status = rpc_wait_for_completion_task(task); |
6091 | rpc_put_task(task); | 5972 | rpc_put_task(task); |
6092 | out: | 5973 | out: |
6093 | request->fl_flags = fl_flags; | 5974 | request->fl_flags = fl_flags; |
@@ -6174,7 +6055,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) | |||
6174 | goto out_release_open_seqid; | 6055 | goto out_release_open_seqid; |
6175 | } | 6056 | } |
6176 | data->timestamp = jiffies; | 6057 | data->timestamp = jiffies; |
6177 | if (nfs4_setup_sequence(data->server, | 6058 | if (nfs4_setup_sequence(data->server->nfs_client, |
6178 | &data->arg.seq_args, | 6059 | &data->arg.seq_args, |
6179 | &data->res.seq_res, | 6060 | &data->res.seq_res, |
6180 | task) == 0) | 6061 | task) == 0) |
@@ -6314,7 +6195,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f | |||
6314 | task = rpc_run_task(&task_setup_data); | 6195 | task = rpc_run_task(&task_setup_data); |
6315 | if (IS_ERR(task)) | 6196 | if (IS_ERR(task)) |
6316 | return PTR_ERR(task); | 6197 | return PTR_ERR(task); |
6317 | ret = nfs4_wait_for_completion_rpc_task(task); | 6198 | ret = rpc_wait_for_completion_task(task); |
6318 | if (ret == 0) { | 6199 | if (ret == 0) { |
6319 | ret = data->rpc_status; | 6200 | ret = data->rpc_status; |
6320 | if (ret) | 6201 | if (ret) |
@@ -6393,8 +6274,7 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques | |||
6393 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || | 6274 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || |
6394 | test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) | 6275 | test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) |
6395 | return 0; | 6276 | return 0; |
6396 | status = nfs4_lock_expired(state, request); | 6277 | return nfs4_lock_expired(state, request); |
6397 | return status; | ||
6398 | } | 6278 | } |
6399 | #endif | 6279 | #endif |
6400 | 6280 | ||
@@ -6640,8 +6520,8 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata | |||
6640 | { | 6520 | { |
6641 | struct nfs_release_lockowner_data *data = calldata; | 6521 | struct nfs_release_lockowner_data *data = calldata; |
6642 | struct nfs_server *server = data->server; | 6522 | struct nfs_server *server = data->server; |
6643 | nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, | 6523 | nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, |
6644 | &data->args.seq_args, &data->res.seq_res, task); | 6524 | &data->res.seq_res, task); |
6645 | data->args.lock_owner.clientid = server->nfs_client->cl_clientid; | 6525 | data->args.lock_owner.clientid = server->nfs_client->cl_clientid; |
6646 | data->timestamp = jiffies; | 6526 | data->timestamp = jiffies; |
6647 | } | 6527 | } |
@@ -7232,11 +7112,9 @@ static bool | |||
7232 | nfs41_same_server_scope(struct nfs41_server_scope *a, | 7112 | nfs41_same_server_scope(struct nfs41_server_scope *a, |
7233 | struct nfs41_server_scope *b) | 7113 | struct nfs41_server_scope *b) |
7234 | { | 7114 | { |
7235 | if (a->server_scope_sz == b->server_scope_sz && | 7115 | if (a->server_scope_sz != b->server_scope_sz) |
7236 | memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) | 7116 | return false; |
7237 | return true; | 7117 | return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; |
7238 | |||
7239 | return false; | ||
7240 | } | 7118 | } |
7241 | 7119 | ||
7242 | static void | 7120 | static void |
@@ -7831,7 +7709,7 @@ static void nfs4_get_lease_time_prepare(struct rpc_task *task, | |||
7831 | dprintk("--> %s\n", __func__); | 7709 | dprintk("--> %s\n", __func__); |
7832 | /* just setup sequence, do not trigger session recovery | 7710 | /* just setup sequence, do not trigger session recovery |
7833 | since we're invoked within one */ | 7711 | since we're invoked within one */ |
7834 | nfs41_setup_sequence(data->clp->cl_session, | 7712 | nfs4_setup_sequence(data->clp, |
7835 | &data->args->la_seq_args, | 7713 | &data->args->la_seq_args, |
7836 | &data->res->lr_seq_res, | 7714 | &data->res->lr_seq_res, |
7837 | task); | 7715 | task); |
@@ -8202,7 +8080,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) | |||
8202 | args = task->tk_msg.rpc_argp; | 8080 | args = task->tk_msg.rpc_argp; |
8203 | res = task->tk_msg.rpc_resp; | 8081 | res = task->tk_msg.rpc_resp; |
8204 | 8082 | ||
8205 | nfs41_setup_sequence(clp->cl_session, args, res, task); | 8083 | nfs4_setup_sequence(clp, args, res, task); |
8206 | } | 8084 | } |
8207 | 8085 | ||
8208 | static const struct rpc_call_ops nfs41_sequence_ops = { | 8086 | static const struct rpc_call_ops nfs41_sequence_ops = { |
@@ -8290,7 +8168,7 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) | |||
8290 | { | 8168 | { |
8291 | struct nfs4_reclaim_complete_data *calldata = data; | 8169 | struct nfs4_reclaim_complete_data *calldata = data; |
8292 | 8170 | ||
8293 | nfs41_setup_sequence(calldata->clp->cl_session, | 8171 | nfs4_setup_sequence(calldata->clp, |
8294 | &calldata->arg.seq_args, | 8172 | &calldata->arg.seq_args, |
8295 | &calldata->res.seq_res, | 8173 | &calldata->res.seq_res, |
8296 | task); | 8174 | task); |
@@ -8382,7 +8260,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp, | |||
8382 | status = PTR_ERR(task); | 8260 | status = PTR_ERR(task); |
8383 | goto out; | 8261 | goto out; |
8384 | } | 8262 | } |
8385 | status = nfs4_wait_for_completion_rpc_task(task); | 8263 | status = rpc_wait_for_completion_task(task); |
8386 | if (status == 0) | 8264 | if (status == 0) |
8387 | status = task->tk_status; | 8265 | status = task->tk_status; |
8388 | rpc_put_task(task); | 8266 | rpc_put_task(task); |
@@ -8397,10 +8275,9 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) | |||
8397 | { | 8275 | { |
8398 | struct nfs4_layoutget *lgp = calldata; | 8276 | struct nfs4_layoutget *lgp = calldata; |
8399 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); | 8277 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); |
8400 | struct nfs4_session *session = nfs4_get_session(server); | ||
8401 | 8278 | ||
8402 | dprintk("--> %s\n", __func__); | 8279 | dprintk("--> %s\n", __func__); |
8403 | nfs41_setup_sequence(session, &lgp->args.seq_args, | 8280 | nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, |
8404 | &lgp->res.seq_res, task); | 8281 | &lgp->res.seq_res, task); |
8405 | dprintk("<-- %s\n", __func__); | 8282 | dprintk("<-- %s\n", __func__); |
8406 | } | 8283 | } |
@@ -8615,7 +8492,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) | |||
8615 | task = rpc_run_task(&task_setup_data); | 8492 | task = rpc_run_task(&task_setup_data); |
8616 | if (IS_ERR(task)) | 8493 | if (IS_ERR(task)) |
8617 | return ERR_CAST(task); | 8494 | return ERR_CAST(task); |
8618 | status = nfs4_wait_for_completion_rpc_task(task); | 8495 | status = rpc_wait_for_completion_task(task); |
8619 | if (status == 0) { | 8496 | if (status == 0) { |
8620 | status = nfs4_layoutget_handle_exception(task, lgp, &exception); | 8497 | status = nfs4_layoutget_handle_exception(task, lgp, &exception); |
8621 | *timeout = exception.timeout; | 8498 | *timeout = exception.timeout; |
@@ -8644,7 +8521,7 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) | |||
8644 | struct nfs4_layoutreturn *lrp = calldata; | 8521 | struct nfs4_layoutreturn *lrp = calldata; |
8645 | 8522 | ||
8646 | dprintk("--> %s\n", __func__); | 8523 | dprintk("--> %s\n", __func__); |
8647 | nfs41_setup_sequence(lrp->clp->cl_session, | 8524 | nfs4_setup_sequence(lrp->clp, |
8648 | &lrp->args.seq_args, | 8525 | &lrp->args.seq_args, |
8649 | &lrp->res.seq_res, | 8526 | &lrp->res.seq_res, |
8650 | task); | 8527 | task); |
@@ -8794,9 +8671,8 @@ static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) | |||
8794 | { | 8671 | { |
8795 | struct nfs4_layoutcommit_data *data = calldata; | 8672 | struct nfs4_layoutcommit_data *data = calldata; |
8796 | struct nfs_server *server = NFS_SERVER(data->args.inode); | 8673 | struct nfs_server *server = NFS_SERVER(data->args.inode); |
8797 | struct nfs4_session *session = nfs4_get_session(server); | ||
8798 | 8674 | ||
8799 | nfs41_setup_sequence(session, | 8675 | nfs4_setup_sequence(server->nfs_client, |
8800 | &data->args.seq_args, | 8676 | &data->args.seq_args, |
8801 | &data->res.seq_res, | 8677 | &data->res.seq_res, |
8802 | task); | 8678 | task); |
@@ -9120,7 +8996,7 @@ struct nfs_free_stateid_data { | |||
9120 | static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) | 8996 | static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) |
9121 | { | 8997 | { |
9122 | struct nfs_free_stateid_data *data = calldata; | 8998 | struct nfs_free_stateid_data *data = calldata; |
9123 | nfs41_setup_sequence(nfs4_get_session(data->server), | 8999 | nfs4_setup_sequence(data->server->nfs_client, |
9124 | &data->args.seq_args, | 9000 | &data->args.seq_args, |
9125 | &data->res.seq_res, | 9001 | &data->res.seq_res, |
9126 | task); | 9002 | task); |
@@ -9232,10 +9108,8 @@ static bool nfs41_match_stateid(const nfs4_stateid *s1, | |||
9232 | 9108 | ||
9233 | if (s1->seqid == s2->seqid) | 9109 | if (s1->seqid == s2->seqid) |
9234 | return true; | 9110 | return true; |
9235 | if (s1->seqid == 0 || s2->seqid == 0) | ||
9236 | return true; | ||
9237 | 9111 | ||
9238 | return false; | 9112 | return s1->seqid == 0 || s2->seqid == 0; |
9239 | } | 9113 | } |
9240 | 9114 | ||
9241 | #endif /* CONFIG_NFS_V4_1 */ | 9115 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 82e77198d17e..1f8c2ae43a8d 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -153,7 +153,7 @@ void nfs4_set_lease_period(struct nfs_client *clp, | |||
153 | spin_unlock(&clp->cl_lock); | 153 | spin_unlock(&clp->cl_lock); |
154 | 154 | ||
155 | /* Cap maximum reconnect timeout at 1/2 lease period */ | 155 | /* Cap maximum reconnect timeout at 1/2 lease period */ |
156 | rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1); | 156 | rpc_set_connect_timeout(clp->cl_rpcclient, lease, lease >> 1); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* | 159 | /* |
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index dae385500005..dfae4880eacb 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h | |||
@@ -103,6 +103,11 @@ static inline bool nfs4_test_locked_slot(const struct nfs4_slot_table *tbl, | |||
103 | return !!test_bit(slotid, tbl->used_slots); | 103 | return !!test_bit(slotid, tbl->used_slots); |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline struct nfs4_session *nfs4_get_session(const struct nfs_client *clp) | ||
107 | { | ||
108 | return clp->cl_session; | ||
109 | } | ||
110 | |||
106 | #if defined(CONFIG_NFS_V4_1) | 111 | #if defined(CONFIG_NFS_V4_1) |
107 | extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, | 112 | extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, |
108 | u32 target_highest_slotid); | 113 | u32 target_highest_slotid); |
@@ -170,6 +175,8 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp) | |||
170 | return 0; | 175 | return 0; |
171 | } | 176 | } |
172 | 177 | ||
178 | #define nfs_session_id_hash(session) (0) | ||
179 | |||
173 | #endif /* defined(CONFIG_NFS_V4_1) */ | 180 | #endif /* defined(CONFIG_NFS_V4_1) */ |
174 | #endif /* IS_ENABLED(CONFIG_NFS_V4) */ | 181 | #endif /* IS_ENABLED(CONFIG_NFS_V4) */ |
175 | #endif /* __LINUX_FS_NFS_NFS4SESSION_H */ | 182 | #endif /* __LINUX_FS_NFS_NFS4SESSION_H */ |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index daeb94e3acd4..8156bad6b441 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -868,7 +868,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ | |||
868 | 868 | ||
869 | for(;;) { | 869 | for(;;) { |
870 | spin_lock(&state->state_lock); | 870 | spin_lock(&state->state_lock); |
871 | lsp = __nfs4_find_lock_state(state, owner, 0); | 871 | lsp = __nfs4_find_lock_state(state, owner, NULL); |
872 | if (lsp != NULL) | 872 | if (lsp != NULL) |
873 | break; | 873 | break; |
874 | if (new != NULL) { | 874 | if (new != NULL) { |
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index cfb8f7ce5cf6..845d0eadefc9 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h | |||
@@ -241,38 +241,6 @@ DEFINE_NFS4_CLIENTID_EVENT(nfs4_bind_conn_to_session); | |||
241 | DEFINE_NFS4_CLIENTID_EVENT(nfs4_sequence); | 241 | DEFINE_NFS4_CLIENTID_EVENT(nfs4_sequence); |
242 | DEFINE_NFS4_CLIENTID_EVENT(nfs4_reclaim_complete); | 242 | DEFINE_NFS4_CLIENTID_EVENT(nfs4_reclaim_complete); |
243 | 243 | ||
244 | TRACE_EVENT(nfs4_setup_sequence, | ||
245 | TP_PROTO( | ||
246 | const struct nfs4_session *session, | ||
247 | const struct nfs4_sequence_args *args | ||
248 | ), | ||
249 | TP_ARGS(session, args), | ||
250 | |||
251 | TP_STRUCT__entry( | ||
252 | __field(unsigned int, session) | ||
253 | __field(unsigned int, slot_nr) | ||
254 | __field(unsigned int, seq_nr) | ||
255 | __field(unsigned int, highest_used_slotid) | ||
256 | ), | ||
257 | |||
258 | TP_fast_assign( | ||
259 | const struct nfs4_slot *sa_slot = args->sa_slot; | ||
260 | __entry->session = nfs_session_id_hash(&session->sess_id); | ||
261 | __entry->slot_nr = sa_slot->slot_nr; | ||
262 | __entry->seq_nr = sa_slot->seq_nr; | ||
263 | __entry->highest_used_slotid = | ||
264 | sa_slot->table->highest_used_slotid; | ||
265 | ), | ||
266 | TP_printk( | ||
267 | "session=0x%08x slot_nr=%u seq_nr=%u " | ||
268 | "highest_used_slotid=%u", | ||
269 | __entry->session, | ||
270 | __entry->slot_nr, | ||
271 | __entry->seq_nr, | ||
272 | __entry->highest_used_slotid | ||
273 | ) | ||
274 | ); | ||
275 | |||
276 | #define show_nfs4_sequence_status_flags(status) \ | 244 | #define show_nfs4_sequence_status_flags(status) \ |
277 | __print_flags((unsigned long)status, "|", \ | 245 | __print_flags((unsigned long)status, "|", \ |
278 | { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \ | 246 | { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \ |
@@ -382,6 +350,38 @@ TRACE_EVENT(nfs4_cb_sequence, | |||
382 | ); | 350 | ); |
383 | #endif /* CONFIG_NFS_V4_1 */ | 351 | #endif /* CONFIG_NFS_V4_1 */ |
384 | 352 | ||
353 | TRACE_EVENT(nfs4_setup_sequence, | ||
354 | TP_PROTO( | ||
355 | const struct nfs4_session *session, | ||
356 | const struct nfs4_sequence_args *args | ||
357 | ), | ||
358 | TP_ARGS(session, args), | ||
359 | |||
360 | TP_STRUCT__entry( | ||
361 | __field(unsigned int, session) | ||
362 | __field(unsigned int, slot_nr) | ||
363 | __field(unsigned int, seq_nr) | ||
364 | __field(unsigned int, highest_used_slotid) | ||
365 | ), | ||
366 | |||
367 | TP_fast_assign( | ||
368 | const struct nfs4_slot *sa_slot = args->sa_slot; | ||
369 | __entry->session = session ? nfs_session_id_hash(&session->sess_id) : 0; | ||
370 | __entry->slot_nr = sa_slot->slot_nr; | ||
371 | __entry->seq_nr = sa_slot->seq_nr; | ||
372 | __entry->highest_used_slotid = | ||
373 | sa_slot->table->highest_used_slotid; | ||
374 | ), | ||
375 | TP_printk( | ||
376 | "session=0x%08x slot_nr=%u seq_nr=%u " | ||
377 | "highest_used_slotid=%u", | ||
378 | __entry->session, | ||
379 | __entry->slot_nr, | ||
380 | __entry->seq_nr, | ||
381 | __entry->highest_used_slotid | ||
382 | ) | ||
383 | ); | ||
384 | |||
385 | DECLARE_EVENT_CLASS(nfs4_open_event, | 385 | DECLARE_EVENT_CLASS(nfs4_open_event, |
386 | TP_PROTO( | 386 | TP_PROTO( |
387 | const struct nfs_open_context *ctx, | 387 | const struct nfs_open_context *ctx, |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e9255cb453e6..f0369e362753 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -169,8 +169,10 @@ static int nfs4_stat_to_errno(int); | |||
169 | open_owner_id_maxsz + \ | 169 | open_owner_id_maxsz + \ |
170 | encode_opentype_maxsz + \ | 170 | encode_opentype_maxsz + \ |
171 | encode_claim_null_maxsz) | 171 | encode_claim_null_maxsz) |
172 | #define decode_space_limit_maxsz (3) | ||
172 | #define decode_ace_maxsz (3 + nfs4_owner_maxsz) | 173 | #define decode_ace_maxsz (3 + nfs4_owner_maxsz) |
173 | #define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \ | 174 | #define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \ |
175 | decode_space_limit_maxsz + \ | ||
174 | decode_ace_maxsz) | 176 | decode_ace_maxsz) |
175 | #define decode_change_info_maxsz (5) | 177 | #define decode_change_info_maxsz (5) |
176 | #define decode_open_maxsz (op_decode_hdr_maxsz + \ | 178 | #define decode_open_maxsz (op_decode_hdr_maxsz + \ |
@@ -924,34 +926,22 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes) | |||
924 | 926 | ||
925 | static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) | 927 | static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) |
926 | { | 928 | { |
927 | __be32 *p; | 929 | WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); |
928 | |||
929 | p = xdr_reserve_space(xdr, len); | ||
930 | xdr_encode_opaque_fixed(p, buf, len); | ||
931 | } | 930 | } |
932 | 931 | ||
933 | static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) | 932 | static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) |
934 | { | 933 | { |
935 | __be32 *p; | 934 | WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0); |
936 | |||
937 | p = reserve_space(xdr, 4 + len); | ||
938 | xdr_encode_opaque(p, str, len); | ||
939 | } | 935 | } |
940 | 936 | ||
941 | static void encode_uint32(struct xdr_stream *xdr, u32 n) | 937 | static void encode_uint32(struct xdr_stream *xdr, u32 n) |
942 | { | 938 | { |
943 | __be32 *p; | 939 | WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0); |
944 | |||
945 | p = reserve_space(xdr, 4); | ||
946 | *p = cpu_to_be32(n); | ||
947 | } | 940 | } |
948 | 941 | ||
949 | static void encode_uint64(struct xdr_stream *xdr, u64 n) | 942 | static void encode_uint64(struct xdr_stream *xdr, u64 n) |
950 | { | 943 | { |
951 | __be32 *p; | 944 | WARN_ON_ONCE(xdr_stream_encode_u64(xdr, n) < 0); |
952 | |||
953 | p = reserve_space(xdr, 8); | ||
954 | xdr_encode_hyper(p, n); | ||
955 | } | 945 | } |
956 | 946 | ||
957 | static void encode_nfs4_seqid(struct xdr_stream *xdr, | 947 | static void encode_nfs4_seqid(struct xdr_stream *xdr, |
@@ -2524,7 +2514,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2524 | encode_compound_hdr(xdr, req, &hdr); | 2514 | encode_compound_hdr(xdr, req, &hdr); |
2525 | encode_sequence(xdr, &args->seq_args, &hdr); | 2515 | encode_sequence(xdr, &args->seq_args, &hdr); |
2526 | encode_putfh(xdr, args->fh, &hdr); | 2516 | encode_putfh(xdr, args->fh, &hdr); |
2527 | replen = hdr.replen + op_decode_hdr_maxsz + 1; | 2517 | replen = hdr.replen + op_decode_hdr_maxsz; |
2528 | encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); | 2518 | encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); |
2529 | 2519 | ||
2530 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, | 2520 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, |
@@ -3062,20 +3052,15 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) | |||
3062 | 3052 | ||
3063 | static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) | 3053 | static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) |
3064 | { | 3054 | { |
3065 | __be32 *p; | 3055 | ssize_t ret = xdr_stream_decode_opaque_inline(xdr, (void **)string, |
3066 | 3056 | NFS4_OPAQUE_LIMIT); | |
3067 | p = xdr_inline_decode(xdr, 4); | 3057 | if (unlikely(ret < 0)) { |
3068 | if (unlikely(!p)) | 3058 | if (ret == -EBADMSG) |
3069 | goto out_overflow; | 3059 | print_overflow_msg(__func__, xdr); |
3070 | *len = be32_to_cpup(p); | 3060 | return -EIO; |
3071 | p = xdr_inline_decode(xdr, *len); | 3061 | } |
3072 | if (unlikely(!p)) | 3062 | *len = ret; |
3073 | goto out_overflow; | ||
3074 | *string = (char *)p; | ||
3075 | return 0; | 3063 | return 0; |
3076 | out_overflow: | ||
3077 | print_overflow_msg(__func__, xdr); | ||
3078 | return -EIO; | ||
3079 | } | 3064 | } |
3080 | 3065 | ||
3081 | static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) | 3066 | static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) |
@@ -3142,7 +3127,7 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) | |||
3142 | } | 3127 | } |
3143 | 3128 | ||
3144 | /* Dummy routine */ | 3129 | /* Dummy routine */ |
3145 | static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp) | 3130 | static int decode_ace(struct xdr_stream *xdr, void *ace) |
3146 | { | 3131 | { |
3147 | __be32 *p; | 3132 | __be32 *p; |
3148 | unsigned int strlen; | 3133 | unsigned int strlen; |
@@ -3890,45 +3875,50 @@ out_overflow: | |||
3890 | return -EIO; | 3875 | return -EIO; |
3891 | } | 3876 | } |
3892 | 3877 | ||
3878 | static ssize_t decode_nfs4_string(struct xdr_stream *xdr, | ||
3879 | struct nfs4_string *name, gfp_t gfp_flags) | ||
3880 | { | ||
3881 | ssize_t ret; | ||
3882 | |||
3883 | ret = xdr_stream_decode_string_dup(xdr, &name->data, | ||
3884 | XDR_MAX_NETOBJ, gfp_flags); | ||
3885 | name->len = 0; | ||
3886 | if (ret > 0) | ||
3887 | name->len = ret; | ||
3888 | return ret; | ||
3889 | } | ||
3890 | |||
3893 | static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, | 3891 | static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, |
3894 | const struct nfs_server *server, kuid_t *uid, | 3892 | const struct nfs_server *server, kuid_t *uid, |
3895 | struct nfs4_string *owner_name) | 3893 | struct nfs4_string *owner_name) |
3896 | { | 3894 | { |
3897 | uint32_t len; | 3895 | ssize_t len; |
3898 | __be32 *p; | 3896 | char *p; |
3899 | int ret = 0; | ||
3900 | 3897 | ||
3901 | *uid = make_kuid(&init_user_ns, -2); | 3898 | *uid = make_kuid(&init_user_ns, -2); |
3902 | if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) | 3899 | if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) |
3903 | return -EIO; | 3900 | return -EIO; |
3904 | if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) { | 3901 | if (!(bitmap[1] & FATTR4_WORD1_OWNER)) |
3905 | p = xdr_inline_decode(xdr, 4); | 3902 | return 0; |
3906 | if (unlikely(!p)) | 3903 | bitmap[1] &= ~FATTR4_WORD1_OWNER; |
3907 | goto out_overflow; | 3904 | |
3908 | len = be32_to_cpup(p); | 3905 | if (owner_name != NULL) { |
3909 | p = xdr_inline_decode(xdr, len); | 3906 | len = decode_nfs4_string(xdr, owner_name, GFP_NOWAIT); |
3910 | if (unlikely(!p)) | 3907 | if (len <= 0) |
3911 | goto out_overflow; | 3908 | goto out; |
3912 | if (owner_name != NULL) { | 3909 | dprintk("%s: name=%s\n", __func__, owner_name->data); |
3913 | owner_name->data = kmemdup(p, len, GFP_NOWAIT); | 3910 | return NFS_ATTR_FATTR_OWNER_NAME; |
3914 | if (owner_name->data != NULL) { | 3911 | } else { |
3915 | owner_name->len = len; | 3912 | len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, |
3916 | ret = NFS_ATTR_FATTR_OWNER_NAME; | 3913 | XDR_MAX_NETOBJ); |
3917 | } | 3914 | if (len <= 0 || nfs_map_name_to_uid(server, p, len, uid) != 0) |
3918 | } else if (len < XDR_MAX_NETOBJ) { | 3915 | goto out; |
3919 | if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0) | 3916 | dprintk("%s: uid=%d\n", __func__, (int)from_kuid(&init_user_ns, *uid)); |
3920 | ret = NFS_ATTR_FATTR_OWNER; | 3917 | return NFS_ATTR_FATTR_OWNER; |
3921 | else | ||
3922 | dprintk("%s: nfs_map_name_to_uid failed!\n", | ||
3923 | __func__); | ||
3924 | } else | ||
3925 | dprintk("%s: name too long (%u)!\n", | ||
3926 | __func__, len); | ||
3927 | bitmap[1] &= ~FATTR4_WORD1_OWNER; | ||
3928 | } | 3918 | } |
3929 | dprintk("%s: uid=%d\n", __func__, (int)from_kuid(&init_user_ns, *uid)); | 3919 | out: |
3930 | return ret; | 3920 | if (len != -EBADMSG) |
3931 | out_overflow: | 3921 | return 0; |
3932 | print_overflow_msg(__func__, xdr); | 3922 | print_overflow_msg(__func__, xdr); |
3933 | return -EIO; | 3923 | return -EIO; |
3934 | } | 3924 | } |
@@ -3937,41 +3927,33 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, | |||
3937 | const struct nfs_server *server, kgid_t *gid, | 3927 | const struct nfs_server *server, kgid_t *gid, |
3938 | struct nfs4_string *group_name) | 3928 | struct nfs4_string *group_name) |
3939 | { | 3929 | { |
3940 | uint32_t len; | 3930 | ssize_t len; |
3941 | __be32 *p; | 3931 | char *p; |
3942 | int ret = 0; | ||
3943 | 3932 | ||
3944 | *gid = make_kgid(&init_user_ns, -2); | 3933 | *gid = make_kgid(&init_user_ns, -2); |
3945 | if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) | 3934 | if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) |
3946 | return -EIO; | 3935 | return -EIO; |
3947 | if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { | 3936 | if (!(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) |
3948 | p = xdr_inline_decode(xdr, 4); | 3937 | return 0; |
3949 | if (unlikely(!p)) | 3938 | bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; |
3950 | goto out_overflow; | 3939 | |
3951 | len = be32_to_cpup(p); | 3940 | if (group_name != NULL) { |
3952 | p = xdr_inline_decode(xdr, len); | 3941 | len = decode_nfs4_string(xdr, group_name, GFP_NOWAIT); |
3953 | if (unlikely(!p)) | 3942 | if (len <= 0) |
3954 | goto out_overflow; | 3943 | goto out; |
3955 | if (group_name != NULL) { | 3944 | dprintk("%s: name=%s\n", __func__, group_name->data); |
3956 | group_name->data = kmemdup(p, len, GFP_NOWAIT); | 3945 | return NFS_ATTR_FATTR_OWNER_NAME; |
3957 | if (group_name->data != NULL) { | 3946 | } else { |
3958 | group_name->len = len; | 3947 | len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, |
3959 | ret = NFS_ATTR_FATTR_GROUP_NAME; | 3948 | XDR_MAX_NETOBJ); |
3960 | } | 3949 | if (len <= 0 || nfs_map_group_to_gid(server, p, len, gid) != 0) |
3961 | } else if (len < XDR_MAX_NETOBJ) { | 3950 | goto out; |
3962 | if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0) | 3951 | dprintk("%s: gid=%d\n", __func__, (int)from_kgid(&init_user_ns, *gid)); |
3963 | ret = NFS_ATTR_FATTR_GROUP; | 3952 | return NFS_ATTR_FATTR_GROUP; |
3964 | else | ||
3965 | dprintk("%s: nfs_map_group_to_gid failed!\n", | ||
3966 | __func__); | ||
3967 | } else | ||
3968 | dprintk("%s: name too long (%u)!\n", | ||
3969 | __func__, len); | ||
3970 | bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; | ||
3971 | } | 3953 | } |
3972 | dprintk("%s: gid=%d\n", __func__, (int)from_kgid(&init_user_ns, *gid)); | 3954 | out: |
3973 | return ret; | 3955 | if (len != -EBADMSG) |
3974 | out_overflow: | 3956 | return 0; |
3975 | print_overflow_msg(__func__, xdr); | 3957 | print_overflow_msg(__func__, xdr); |
3976 | return -EIO; | 3958 | return -EIO; |
3977 | } | 3959 | } |
@@ -4294,15 +4276,12 @@ out_overflow: | |||
4294 | 4276 | ||
4295 | static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) | 4277 | static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) |
4296 | { | 4278 | { |
4297 | __be32 *p; | 4279 | ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len); |
4298 | 4280 | if (unlikely(ret < 0)) { | |
4299 | p = xdr_inline_decode(xdr, len); | 4281 | print_overflow_msg(__func__, xdr); |
4300 | if (likely(p)) { | 4282 | return -EIO; |
4301 | memcpy(buf, p, len); | ||
4302 | return 0; | ||
4303 | } | 4283 | } |
4304 | print_overflow_msg(__func__, xdr); | 4284 | return 0; |
4305 | return -EIO; | ||
4306 | } | 4285 | } |
4307 | 4286 | ||
4308 | static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) | 4287 | static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) |
@@ -5093,7 +5072,7 @@ static int decode_rw_delegation(struct xdr_stream *xdr, | |||
5093 | if (decode_space_limit(xdr, &res->pagemod_limit) < 0) | 5072 | if (decode_space_limit(xdr, &res->pagemod_limit) < 0) |
5094 | return -EIO; | 5073 | return -EIO; |
5095 | } | 5074 | } |
5096 | return decode_ace(xdr, NULL, res->server->nfs_client); | 5075 | return decode_ace(xdr, NULL); |
5097 | out_overflow: | 5076 | out_overflow: |
5098 | print_overflow_msg(__func__, xdr); | 5077 | print_overflow_msg(__func__, xdr); |
5099 | return -EIO; | 5078 | return -EIO; |
@@ -5660,8 +5639,6 @@ static int decode_exchange_id(struct xdr_stream *xdr, | |||
5660 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); | 5639 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); |
5661 | if (unlikely(status)) | 5640 | if (unlikely(status)) |
5662 | return status; | 5641 | return status; |
5663 | if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) | ||
5664 | return -EIO; | ||
5665 | memcpy(res->server_owner->major_id, dummy_str, dummy); | 5642 | memcpy(res->server_owner->major_id, dummy_str, dummy); |
5666 | res->server_owner->major_id_sz = dummy; | 5643 | res->server_owner->major_id_sz = dummy; |
5667 | 5644 | ||
@@ -5669,8 +5646,6 @@ static int decode_exchange_id(struct xdr_stream *xdr, | |||
5669 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); | 5646 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); |
5670 | if (unlikely(status)) | 5647 | if (unlikely(status)) |
5671 | return status; | 5648 | return status; |
5672 | if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) | ||
5673 | return -EIO; | ||
5674 | memcpy(res->server_scope->server_scope, dummy_str, dummy); | 5649 | memcpy(res->server_scope->server_scope, dummy_str, dummy); |
5675 | res->server_scope->server_scope_sz = dummy; | 5650 | res->server_scope->server_scope_sz = dummy; |
5676 | 5651 | ||
@@ -5685,16 +5660,12 @@ static int decode_exchange_id(struct xdr_stream *xdr, | |||
5685 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); | 5660 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); |
5686 | if (unlikely(status)) | 5661 | if (unlikely(status)) |
5687 | return status; | 5662 | return status; |
5688 | if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) | ||
5689 | return -EIO; | ||
5690 | memcpy(res->impl_id->domain, dummy_str, dummy); | 5663 | memcpy(res->impl_id->domain, dummy_str, dummy); |
5691 | 5664 | ||
5692 | /* nii_name */ | 5665 | /* nii_name */ |
5693 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); | 5666 | status = decode_opaque_inline(xdr, &dummy, &dummy_str); |
5694 | if (unlikely(status)) | 5667 | if (unlikely(status)) |
5695 | return status; | 5668 | return status; |
5696 | if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) | ||
5697 | return -EIO; | ||
5698 | memcpy(res->impl_id->name, dummy_str, dummy); | 5669 | memcpy(res->impl_id->name, dummy_str, dummy); |
5699 | 5670 | ||
5700 | /* nii_date */ | 5671 | /* nii_date */ |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 6bca17883b93..54e0f9f2dd94 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -531,39 +531,32 @@ static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss, | |||
531 | int showdefaults) | 531 | int showdefaults) |
532 | { | 532 | { |
533 | struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address; | 533 | struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address; |
534 | char *proto = NULL; | ||
534 | 535 | ||
535 | seq_printf(m, ",mountproto="); | ||
536 | switch (sap->sa_family) { | 536 | switch (sap->sa_family) { |
537 | case AF_INET: | 537 | case AF_INET: |
538 | switch (nfss->mountd_protocol) { | 538 | switch (nfss->mountd_protocol) { |
539 | case IPPROTO_UDP: | 539 | case IPPROTO_UDP: |
540 | seq_printf(m, RPCBIND_NETID_UDP); | 540 | proto = RPCBIND_NETID_UDP; |
541 | break; | 541 | break; |
542 | case IPPROTO_TCP: | 542 | case IPPROTO_TCP: |
543 | seq_printf(m, RPCBIND_NETID_TCP); | 543 | proto = RPCBIND_NETID_TCP; |
544 | break; | 544 | break; |
545 | default: | ||
546 | if (showdefaults) | ||
547 | seq_printf(m, "auto"); | ||
548 | } | 545 | } |
549 | break; | 546 | break; |
550 | case AF_INET6: | 547 | case AF_INET6: |
551 | switch (nfss->mountd_protocol) { | 548 | switch (nfss->mountd_protocol) { |
552 | case IPPROTO_UDP: | 549 | case IPPROTO_UDP: |
553 | seq_printf(m, RPCBIND_NETID_UDP6); | 550 | proto = RPCBIND_NETID_UDP6; |
554 | break; | 551 | break; |
555 | case IPPROTO_TCP: | 552 | case IPPROTO_TCP: |
556 | seq_printf(m, RPCBIND_NETID_TCP6); | 553 | proto = RPCBIND_NETID_TCP6; |
557 | break; | 554 | break; |
558 | default: | ||
559 | if (showdefaults) | ||
560 | seq_printf(m, "auto"); | ||
561 | } | 555 | } |
562 | break; | 556 | break; |
563 | default: | ||
564 | if (showdefaults) | ||
565 | seq_printf(m, "auto"); | ||
566 | } | 557 | } |
558 | if (proto || showdefaults) | ||
559 | seq_printf(m, ",mountproto=%s", proto ?: "auto"); | ||
567 | } | 560 | } |
568 | 561 | ||
569 | static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, | 562 | static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 006068526542..e75b056f46f4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1785,7 +1785,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
1785 | if (status < 0) { | 1785 | if (status < 0) { |
1786 | nfs_context_set_write_error(req->wb_context, status); | 1786 | nfs_context_set_write_error(req->wb_context, status); |
1787 | nfs_inode_remove_request(req); | 1787 | nfs_inode_remove_request(req); |
1788 | dprintk(", error = %d\n", status); | 1788 | dprintk_cont(", error = %d\n", status); |
1789 | goto next; | 1789 | goto next; |
1790 | } | 1790 | } |
1791 | 1791 | ||
@@ -1794,11 +1794,11 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
1794 | if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { | 1794 | if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { |
1795 | /* We have a match */ | 1795 | /* We have a match */ |
1796 | nfs_inode_remove_request(req); | 1796 | nfs_inode_remove_request(req); |
1797 | dprintk(" OK\n"); | 1797 | dprintk_cont(" OK\n"); |
1798 | goto next; | 1798 | goto next; |
1799 | } | 1799 | } |
1800 | /* We have a mismatch. Write the page again */ | 1800 | /* We have a mismatch. Write the page again */ |
1801 | dprintk(" mismatch\n"); | 1801 | dprintk_cont(" mismatch\n"); |
1802 | nfs_mark_request_dirty(req); | 1802 | nfs_mark_request_dirty(req); |
1803 | set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); | 1803 | set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); |
1804 | next: | 1804 | next: |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c15373894a42..b37dee3acaba 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) | |||
355 | static inline int nlm_compare_locks(const struct file_lock *fl1, | 355 | static inline int nlm_compare_locks(const struct file_lock *fl1, |
356 | const struct file_lock *fl2) | 356 | const struct file_lock *fl2) |
357 | { | 357 | { |
358 | return fl1->fl_pid == fl2->fl_pid | 358 | return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) |
359 | && fl1->fl_pid == fl2->fl_pid | ||
359 | && fl1->fl_owner == fl2->fl_owner | 360 | && fl1->fl_owner == fl2->fl_owner |
360 | && fl1->fl_start == fl2->fl_start | 361 | && fl1->fl_start == fl2->fl_start |
361 | && fl1->fl_end == fl2->fl_end | 362 | && fl1->fl_end == fl2->fl_end |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index b1bc62ba20a2..8fd3504946ad 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | #define UNX_MAXNODENAME __NEW_UTS_LEN | 33 | #define UNX_MAXNODENAME __NEW_UTS_LEN |
34 | #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) | 34 | #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) |
35 | #define UNX_NGROUPS 16 | ||
35 | 36 | ||
36 | struct rpcsec_gss_info; | 37 | struct rpcsec_gss_info; |
37 | 38 | ||
@@ -63,9 +64,6 @@ struct rpc_cred { | |||
63 | struct rcu_head cr_rcu; | 64 | struct rcu_head cr_rcu; |
64 | struct rpc_auth * cr_auth; | 65 | struct rpc_auth * cr_auth; |
65 | const struct rpc_credops *cr_ops; | 66 | const struct rpc_credops *cr_ops; |
66 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
67 | unsigned long cr_magic; /* 0x0f4aa4f0 */ | ||
68 | #endif | ||
69 | unsigned long cr_expire; /* when to gc */ | 67 | unsigned long cr_expire; /* when to gc */ |
70 | unsigned long cr_flags; /* various flags */ | 68 | unsigned long cr_flags; /* various flags */ |
71 | atomic_t cr_count; /* ref count */ | 69 | atomic_t cr_count; /* ref count */ |
@@ -79,8 +77,6 @@ struct rpc_cred { | |||
79 | #define RPCAUTH_CRED_HASHED 2 | 77 | #define RPCAUTH_CRED_HASHED 2 |
80 | #define RPCAUTH_CRED_NEGATIVE 3 | 78 | #define RPCAUTH_CRED_NEGATIVE 3 |
81 | 79 | ||
82 | #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 | ||
83 | |||
84 | /* rpc_auth au_flags */ | 80 | /* rpc_auth au_flags */ |
85 | #define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ | 81 | #define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ |
86 | 82 | ||
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 20d157a518a7..270bad0e1bed 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
@@ -63,15 +63,6 @@ struct cache_head { | |||
63 | 63 | ||
64 | #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ | 64 | #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ |
65 | 65 | ||
66 | struct cache_detail_procfs { | ||
67 | struct proc_dir_entry *proc_ent; | ||
68 | struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; | ||
69 | }; | ||
70 | |||
71 | struct cache_detail_pipefs { | ||
72 | struct dentry *dir; | ||
73 | }; | ||
74 | |||
75 | struct cache_detail { | 66 | struct cache_detail { |
76 | struct module * owner; | 67 | struct module * owner; |
77 | int hash_size; | 68 | int hash_size; |
@@ -123,9 +114,9 @@ struct cache_detail { | |||
123 | time_t last_warn; /* when we last warned about no readers */ | 114 | time_t last_warn; /* when we last warned about no readers */ |
124 | 115 | ||
125 | union { | 116 | union { |
126 | struct cache_detail_procfs procfs; | 117 | struct proc_dir_entry *procfs; |
127 | struct cache_detail_pipefs pipefs; | 118 | struct dentry *pipefs; |
128 | } u; | 119 | }; |
129 | struct net *net; | 120 | struct net *net; |
130 | }; | 121 | }; |
131 | 122 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 333ad11b3dd9..6095ecba0dde 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -182,7 +182,6 @@ int rpc_protocol(struct rpc_clnt *); | |||
182 | struct net * rpc_net_ns(struct rpc_clnt *); | 182 | struct net * rpc_net_ns(struct rpc_clnt *); |
183 | size_t rpc_max_payload(struct rpc_clnt *); | 183 | size_t rpc_max_payload(struct rpc_clnt *); |
184 | size_t rpc_max_bc_payload(struct rpc_clnt *); | 184 | size_t rpc_max_bc_payload(struct rpc_clnt *); |
185 | unsigned long rpc_get_timeout(struct rpc_clnt *clnt); | ||
186 | void rpc_force_rebind(struct rpc_clnt *); | 185 | void rpc_force_rebind(struct rpc_clnt *); |
187 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); | 186 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); |
188 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); | 187 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); |
@@ -202,8 +201,9 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, | |||
202 | struct rpc_xprt *, | 201 | struct rpc_xprt *, |
203 | void *), | 202 | void *), |
204 | void *data); | 203 | void *data); |
205 | void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, | 204 | void rpc_set_connect_timeout(struct rpc_clnt *clnt, |
206 | unsigned long timeo); | 205 | unsigned long connect_timeout, |
206 | unsigned long reconnect_timeout); | ||
207 | 207 | ||
208 | int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, | 208 | int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, |
209 | struct rpc_xprt_switch *, | 209 | struct rpc_xprt_switch *, |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 59a7889e15db..8da0f37f3bdc 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
@@ -20,33 +20,55 @@ extern unsigned int nfsd_debug; | |||
20 | extern unsigned int nlm_debug; | 20 | extern unsigned int nlm_debug; |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define dprintk(args...) dfprintk(FACILITY, ## args) | 23 | #define dprintk(fmt, ...) \ |
24 | #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) | 24 | dfprintk(FACILITY, fmt, ##__VA_ARGS__) |
25 | #define dprintk_cont(fmt, ...) \ | ||
26 | dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) | ||
27 | #define dprintk_rcu(fmt, ...) \ | ||
28 | dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) | ||
29 | #define dprintk_rcu_cont(fmt, ...) \ | ||
30 | dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) | ||
25 | 31 | ||
26 | #undef ifdebug | 32 | #undef ifdebug |
27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 33 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
28 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) | 34 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) |
29 | 35 | ||
30 | # define dfprintk(fac, args...) \ | 36 | # define dfprintk(fac, fmt, ...) \ |
31 | do { \ | 37 | do { \ |
32 | ifdebug(fac) \ | 38 | ifdebug(fac) \ |
33 | printk(KERN_DEFAULT args); \ | 39 | printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ |
34 | } while (0) | 40 | } while (0) |
35 | 41 | ||
36 | # define dfprintk_rcu(fac, args...) \ | 42 | # define dfprintk_cont(fac, fmt, ...) \ |
37 | do { \ | 43 | do { \ |
38 | ifdebug(fac) { \ | 44 | ifdebug(fac) \ |
39 | rcu_read_lock(); \ | 45 | printk(KERN_CONT fmt, ##__VA_ARGS__); \ |
40 | printk(KERN_DEFAULT args); \ | 46 | } while (0) |
41 | rcu_read_unlock(); \ | 47 | |
42 | } \ | 48 | # define dfprintk_rcu(fac, fmt, ...) \ |
43 | } while (0) | 49 | do { \ |
50 | ifdebug(fac) { \ | ||
51 | rcu_read_lock(); \ | ||
52 | printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ | ||
53 | rcu_read_unlock(); \ | ||
54 | } \ | ||
55 | } while (0) | ||
56 | |||
57 | # define dfprintk_rcu_cont(fac, fmt, ...) \ | ||
58 | do { \ | ||
59 | ifdebug(fac) { \ | ||
60 | rcu_read_lock(); \ | ||
61 | printk(KERN_CONT fmt, ##__VA_ARGS__); \ | ||
62 | rcu_read_unlock(); \ | ||
63 | } \ | ||
64 | } while (0) | ||
44 | 65 | ||
45 | # define RPC_IFDEBUG(x) x | 66 | # define RPC_IFDEBUG(x) x |
46 | #else | 67 | #else |
47 | # define ifdebug(fac) if (0) | 68 | # define ifdebug(fac) if (0) |
48 | # define dfprintk(fac, args...) do {} while (0) | 69 | # define dfprintk(fac, fmt, ...) do {} while (0) |
49 | # define dfprintk_rcu(fac, args...) do {} while (0) | 70 | # define dfprintk_cont(fac, fmt, ...) do {} while (0) |
71 | # define dfprintk_rcu(fac, fmt, ...) do {} while (0) | ||
50 | # define RPC_IFDEBUG(x) | 72 | # define RPC_IFDEBUG(x) |
51 | #endif | 73 | #endif |
52 | 74 | ||
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 56c48c884a24..054c8cde18f3 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
@@ -242,6 +242,185 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); | |||
242 | extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); | 242 | extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); |
243 | extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); | 243 | extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); |
244 | 244 | ||
245 | ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, | ||
246 | size_t maxlen, gfp_t gfp_flags); | ||
247 | /** | ||
248 | * xdr_align_size - Calculate padded size of an object | ||
249 | * @n: Size of an object being XDR encoded (in bytes) | ||
250 | * | ||
251 | * Return value: | ||
252 | * Size (in bytes) of the object including xdr padding | ||
253 | */ | ||
254 | static inline size_t | ||
255 | xdr_align_size(size_t n) | ||
256 | { | ||
257 | const size_t mask = sizeof(__u32) - 1; | ||
258 | |||
259 | return (n + mask) & ~mask; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * xdr_stream_encode_u32 - Encode a 32-bit integer | ||
264 | * @xdr: pointer to xdr_stream | ||
265 | * @n: integer to encode | ||
266 | * | ||
267 | * Return values: | ||
268 | * On success, returns length in bytes of XDR buffer consumed | ||
269 | * %-EMSGSIZE on XDR buffer overflow | ||
270 | */ | ||
271 | static inline ssize_t | ||
272 | xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) | ||
273 | { | ||
274 | const size_t len = sizeof(n); | ||
275 | __be32 *p = xdr_reserve_space(xdr, len); | ||
276 | |||
277 | if (unlikely(!p)) | ||
278 | return -EMSGSIZE; | ||
279 | *p = cpu_to_be32(n); | ||
280 | return len; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * xdr_stream_encode_u64 - Encode a 64-bit integer | ||
285 | * @xdr: pointer to xdr_stream | ||
286 | * @n: 64-bit integer to encode | ||
287 | * | ||
288 | * Return values: | ||
289 | * On success, returns length in bytes of XDR buffer consumed | ||
290 | * %-EMSGSIZE on XDR buffer overflow | ||
291 | */ | ||
292 | static inline ssize_t | ||
293 | xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) | ||
294 | { | ||
295 | const size_t len = sizeof(n); | ||
296 | __be32 *p = xdr_reserve_space(xdr, len); | ||
297 | |||
298 | if (unlikely(!p)) | ||
299 | return -EMSGSIZE; | ||
300 | xdr_encode_hyper(p, n); | ||
301 | return len; | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data | ||
306 | * @xdr: pointer to xdr_stream | ||
307 | * @ptr: pointer to opaque data object | ||
308 | * @len: size of object pointed to by @ptr | ||
309 | * | ||
310 | * Return values: | ||
311 | * On success, returns length in bytes of XDR buffer consumed | ||
312 | * %-EMSGSIZE on XDR buffer overflow | ||
313 | */ | ||
314 | static inline ssize_t | ||
315 | xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) | ||
316 | { | ||
317 | __be32 *p = xdr_reserve_space(xdr, len); | ||
318 | |||
319 | if (unlikely(!p)) | ||
320 | return -EMSGSIZE; | ||
321 | xdr_encode_opaque_fixed(p, ptr, len); | ||
322 | return xdr_align_size(len); | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * xdr_stream_encode_opaque - Encode variable length opaque xdr data | ||
327 | * @xdr: pointer to xdr_stream | ||
328 | * @ptr: pointer to opaque data object | ||
329 | * @len: size of object pointed to by @ptr | ||
330 | * | ||
331 | * Return values: | ||
332 | * On success, returns length in bytes of XDR buffer consumed | ||
333 | * %-EMSGSIZE on XDR buffer overflow | ||
334 | */ | ||
335 | static inline ssize_t | ||
336 | xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) | ||
337 | { | ||
338 | size_t count = sizeof(__u32) + xdr_align_size(len); | ||
339 | __be32 *p = xdr_reserve_space(xdr, count); | ||
340 | |||
341 | if (unlikely(!p)) | ||
342 | return -EMSGSIZE; | ||
343 | xdr_encode_opaque(p, ptr, len); | ||
344 | return count; | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * xdr_stream_decode_u32 - Decode a 32-bit integer | ||
349 | * @xdr: pointer to xdr_stream | ||
350 | * @ptr: location to store integer | ||
351 | * | ||
352 | * Return values: | ||
353 | * %0 on success | ||
354 | * %-EBADMSG on XDR buffer overflow | ||
355 | */ | ||
356 | static inline ssize_t | ||
357 | xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) | ||
358 | { | ||
359 | const size_t count = sizeof(*ptr); | ||
360 | __be32 *p = xdr_inline_decode(xdr, count); | ||
361 | |||
362 | if (unlikely(!p)) | ||
363 | return -EBADMSG; | ||
364 | *ptr = be32_to_cpup(p); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data | ||
370 | * @xdr: pointer to xdr_stream | ||
371 | * @ptr: location to store data | ||
372 | * @len: size of buffer pointed to by @ptr | ||
373 | * | ||
374 | * Return values: | ||
375 | * On success, returns size of object stored in @ptr | ||
376 | * %-EBADMSG on XDR buffer overflow | ||
377 | */ | ||
378 | static inline ssize_t | ||
379 | xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) | ||
380 | { | ||
381 | __be32 *p = xdr_inline_decode(xdr, len); | ||
382 | |||
383 | if (unlikely(!p)) | ||
384 | return -EBADMSG; | ||
385 | xdr_decode_opaque_fixed(p, ptr, len); | ||
386 | return len; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data | ||
391 | * @xdr: pointer to xdr_stream | ||
392 | * @ptr: location to store pointer to opaque data | ||
393 | * @maxlen: maximum acceptable object size | ||
394 | * | ||
395 | * Note: the pointer stored in @ptr cannot be assumed valid after the XDR | ||
396 | * buffer has been destroyed, or even after calling xdr_inline_decode() | ||
397 | * on @xdr. It is therefore expected that the object it points to should | ||
398 | * be processed immediately. | ||
399 | * | ||
400 | * Return values: | ||
401 | * On success, returns size of object stored in *@ptr | ||
402 | * %-EBADMSG on XDR buffer overflow | ||
403 | * %-EMSGSIZE if the size of the object would exceed @maxlen | ||
404 | */ | ||
405 | static inline ssize_t | ||
406 | xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) | ||
407 | { | ||
408 | __be32 *p; | ||
409 | __u32 len; | ||
410 | |||
411 | *ptr = NULL; | ||
412 | if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) | ||
413 | return -EBADMSG; | ||
414 | if (len != 0) { | ||
415 | p = xdr_inline_decode(xdr, len); | ||
416 | if (unlikely(!p)) | ||
417 | return -EBADMSG; | ||
418 | if (unlikely(len > maxlen)) | ||
419 | return -EMSGSIZE; | ||
420 | *ptr = p; | ||
421 | } | ||
422 | return len; | ||
423 | } | ||
245 | #endif /* __KERNEL__ */ | 424 | #endif /* __KERNEL__ */ |
246 | 425 | ||
247 | #endif /* _SUNRPC_XDR_H_ */ | 426 | #endif /* _SUNRPC_XDR_H_ */ |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a5da60b24d83..eab1c749e192 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -137,6 +137,9 @@ struct rpc_xprt_ops { | |||
137 | void (*release_request)(struct rpc_task *task); | 137 | void (*release_request)(struct rpc_task *task); |
138 | void (*close)(struct rpc_xprt *xprt); | 138 | void (*close)(struct rpc_xprt *xprt); |
139 | void (*destroy)(struct rpc_xprt *xprt); | 139 | void (*destroy)(struct rpc_xprt *xprt); |
140 | void (*set_connect_timeout)(struct rpc_xprt *xprt, | ||
141 | unsigned long connect_timeout, | ||
142 | unsigned long reconnect_timeout); | ||
140 | void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); | 143 | void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); |
141 | int (*enable_swap)(struct rpc_xprt *xprt); | 144 | int (*enable_swap)(struct rpc_xprt *xprt); |
142 | void (*disable_swap)(struct rpc_xprt *xprt); | 145 | void (*disable_swap)(struct rpc_xprt *xprt); |
@@ -221,6 +224,7 @@ struct rpc_xprt { | |||
221 | struct timer_list timer; | 224 | struct timer_list timer; |
222 | unsigned long last_used, | 225 | unsigned long last_used, |
223 | idle_timeout, | 226 | idle_timeout, |
227 | connect_timeout, | ||
224 | max_reconnect_timeout; | 228 | max_reconnect_timeout; |
225 | 229 | ||
226 | /* | 230 | /* |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index bef3fb0abb8f..c9959d7e3579 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
@@ -55,6 +55,8 @@ struct sock_xprt { | |||
55 | size_t rcvsize, | 55 | size_t rcvsize, |
56 | sndsize; | 56 | sndsize; |
57 | 57 | ||
58 | struct rpc_timeout tcp_timeout; | ||
59 | |||
58 | /* | 60 | /* |
59 | * Saved socket callback addresses | 61 | * Saved socket callback addresses |
60 | */ | 62 | */ |
@@ -81,6 +83,7 @@ struct sock_xprt { | |||
81 | 83 | ||
82 | #define XPRT_SOCK_CONNECTING 1U | 84 | #define XPRT_SOCK_CONNECTING 1U |
83 | #define XPRT_SOCK_DATA_READY (2) | 85 | #define XPRT_SOCK_DATA_READY (2) |
86 | #define XPRT_SOCK_UPD_TIMEOUT (3) | ||
84 | 87 | ||
85 | #endif /* __KERNEL__ */ | 88 | #endif /* __KERNEL__ */ |
86 | 89 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 2bff63a73cf8..a1ee933e3029 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -464,8 +464,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) | |||
464 | * Note that the cred_unused list must be time-ordered. | 464 | * Note that the cred_unused list must be time-ordered. |
465 | */ | 465 | */ |
466 | if (time_in_range(cred->cr_expire, expired, jiffies) && | 466 | if (time_in_range(cred->cr_expire, expired, jiffies) && |
467 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | 467 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { |
468 | freed = SHRINK_STOP; | ||
468 | break; | 469 | break; |
470 | } | ||
469 | 471 | ||
470 | list_del_init(&cred->cr_lru); | 472 | list_del_init(&cred->cr_lru); |
471 | number_cred_unused--; | 473 | number_cred_unused--; |
@@ -520,7 +522,7 @@ static unsigned long | |||
520 | rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | 522 | rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
521 | 523 | ||
522 | { | 524 | { |
523 | return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; | 525 | return number_cred_unused * sysctl_vfs_cache_pressure / 100; |
524 | } | 526 | } |
525 | 527 | ||
526 | static void | 528 | static void |
@@ -646,9 +648,6 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | |||
646 | cred->cr_auth = auth; | 648 | cred->cr_auth = auth; |
647 | cred->cr_ops = ops; | 649 | cred->cr_ops = ops; |
648 | cred->cr_expire = jiffies; | 650 | cred->cr_expire = jiffies; |
649 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
650 | cred->cr_magic = RPCAUTH_CRED_MAGIC; | ||
651 | #endif | ||
652 | cred->cr_uid = acred->uid; | 651 | cred->cr_uid = acred->uid; |
653 | } | 652 | } |
654 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); | 653 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); |
@@ -876,8 +875,12 @@ int __init rpcauth_init_module(void) | |||
876 | err = rpc_init_generic_auth(); | 875 | err = rpc_init_generic_auth(); |
877 | if (err < 0) | 876 | if (err < 0) |
878 | goto out2; | 877 | goto out2; |
879 | register_shrinker(&rpc_cred_shrinker); | 878 | err = register_shrinker(&rpc_cred_shrinker); |
879 | if (err < 0) | ||
880 | goto out3; | ||
880 | return 0; | 881 | return 0; |
882 | out3: | ||
883 | rpc_destroy_generic_auth(); | ||
881 | out2: | 884 | out2: |
882 | rpc_destroy_authunix(); | 885 | rpc_destroy_authunix(); |
883 | out1: | 886 | out1: |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 4d17376b2acb..5f3d527dff65 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
@@ -139,7 +139,4 @@ struct rpc_cred null_cred = { | |||
139 | .cr_ops = &null_credops, | 139 | .cr_ops = &null_credops, |
140 | .cr_count = ATOMIC_INIT(1), | 140 | .cr_count = ATOMIC_INIT(1), |
141 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, | 141 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, |
142 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
143 | .cr_magic = RPCAUTH_CRED_MAGIC, | ||
144 | #endif | ||
145 | }; | 142 | }; |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 306fc0f54596..82337e1ec9cd 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -14,12 +14,10 @@ | |||
14 | #include <linux/sunrpc/auth.h> | 14 | #include <linux/sunrpc/auth.h> |
15 | #include <linux/user_namespace.h> | 15 | #include <linux/user_namespace.h> |
16 | 16 | ||
17 | #define NFS_NGROUPS 16 | ||
18 | |||
19 | struct unx_cred { | 17 | struct unx_cred { |
20 | struct rpc_cred uc_base; | 18 | struct rpc_cred uc_base; |
21 | kgid_t uc_gid; | 19 | kgid_t uc_gid; |
22 | kgid_t uc_gids[NFS_NGROUPS]; | 20 | kgid_t uc_gids[UNX_NGROUPS]; |
23 | }; | 21 | }; |
24 | #define uc_uid uc_base.cr_uid | 22 | #define uc_uid uc_base.cr_uid |
25 | 23 | ||
@@ -82,13 +80,13 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t | |||
82 | 80 | ||
83 | if (acred->group_info != NULL) | 81 | if (acred->group_info != NULL) |
84 | groups = acred->group_info->ngroups; | 82 | groups = acred->group_info->ngroups; |
85 | if (groups > NFS_NGROUPS) | 83 | if (groups > UNX_NGROUPS) |
86 | groups = NFS_NGROUPS; | 84 | groups = UNX_NGROUPS; |
87 | 85 | ||
88 | cred->uc_gid = acred->gid; | 86 | cred->uc_gid = acred->gid; |
89 | for (i = 0; i < groups; i++) | 87 | for (i = 0; i < groups; i++) |
90 | cred->uc_gids[i] = acred->group_info->gid[i]; | 88 | cred->uc_gids[i] = acred->group_info->gid[i]; |
91 | if (i < NFS_NGROUPS) | 89 | if (i < UNX_NGROUPS) |
92 | cred->uc_gids[i] = INVALID_GID; | 90 | cred->uc_gids[i] = INVALID_GID; |
93 | 91 | ||
94 | return &cred->uc_base; | 92 | return &cred->uc_base; |
@@ -132,12 +130,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) | |||
132 | 130 | ||
133 | if (acred->group_info != NULL) | 131 | if (acred->group_info != NULL) |
134 | groups = acred->group_info->ngroups; | 132 | groups = acred->group_info->ngroups; |
135 | if (groups > NFS_NGROUPS) | 133 | if (groups > UNX_NGROUPS) |
136 | groups = NFS_NGROUPS; | 134 | groups = UNX_NGROUPS; |
137 | for (i = 0; i < groups ; i++) | 135 | for (i = 0; i < groups ; i++) |
138 | if (!gid_eq(cred->uc_gids[i], acred->group_info->gid[i])) | 136 | if (!gid_eq(cred->uc_gids[i], acred->group_info->gid[i])) |
139 | return 0; | 137 | return 0; |
140 | if (groups < NFS_NGROUPS && gid_valid(cred->uc_gids[groups])) | 138 | if (groups < UNX_NGROUPS && gid_valid(cred->uc_gids[groups])) |
141 | return 0; | 139 | return 0; |
142 | return 1; | 140 | return 1; |
143 | } | 141 | } |
@@ -166,7 +164,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) | |||
166 | *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid)); | 164 | *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid)); |
167 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid)); | 165 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid)); |
168 | hold = p++; | 166 | hold = p++; |
169 | for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++) | 167 | for (i = 0; i < UNX_NGROUPS && gid_valid(cred->uc_gids[i]); i++) |
170 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i])); | 168 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i])); |
171 | *hold = htonl(p - hold - 1); /* gid array length */ | 169 | *hold = htonl(p - hold - 1); /* gid array length */ |
172 | *base = htonl((p - base - 1) << 2); /* cred length */ | 170 | *base = htonl((p - base - 1) << 2); /* cred length */ |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index d8639da06d9c..79d55d949d9a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -728,7 +728,7 @@ void cache_clean_deferred(void *owner) | |||
728 | /* | 728 | /* |
729 | * communicate with user-space | 729 | * communicate with user-space |
730 | * | 730 | * |
731 | * We have a magic /proc file - /proc/sunrpc/<cachename>/channel. | 731 | * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. |
732 | * On read, you get a full request, or block. | 732 | * On read, you get a full request, or block. |
733 | * On write, an update request is processed. | 733 | * On write, an update request is processed. |
734 | * Poll works if anything to read, and always allows write. | 734 | * Poll works if anything to read, and always allows write. |
@@ -1283,7 +1283,7 @@ EXPORT_SYMBOL_GPL(qword_get); | |||
1283 | 1283 | ||
1284 | 1284 | ||
1285 | /* | 1285 | /* |
1286 | * support /proc/sunrpc/cache/$CACHENAME/content | 1286 | * support /proc/net/rpc/$CACHENAME/content |
1287 | * as a seqfile. | 1287 | * as a seqfile. |
1288 | * We call ->cache_show passing NULL for the item to | 1288 | * We call ->cache_show passing NULL for the item to |
1289 | * get a header, then pass each real item in the cache | 1289 | * get a header, then pass each real item in the cache |
@@ -1438,20 +1438,11 @@ static ssize_t read_flush(struct file *file, char __user *buf, | |||
1438 | struct cache_detail *cd) | 1438 | struct cache_detail *cd) |
1439 | { | 1439 | { |
1440 | char tbuf[22]; | 1440 | char tbuf[22]; |
1441 | unsigned long p = *ppos; | ||
1442 | size_t len; | 1441 | size_t len; |
1443 | 1442 | ||
1444 | snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time)); | 1443 | len = snprintf(tbuf, sizeof(tbuf), "%lu\n", |
1445 | len = strlen(tbuf); | 1444 | convert_to_wallclock(cd->flush_time)); |
1446 | if (p >= len) | 1445 | return simple_read_from_buffer(buf, count, ppos, tbuf, len); |
1447 | return 0; | ||
1448 | len -= p; | ||
1449 | if (len > count) | ||
1450 | len = count; | ||
1451 | if (copy_to_user(buf, (void*)(tbuf+p), len)) | ||
1452 | return -EFAULT; | ||
1453 | *ppos += len; | ||
1454 | return len; | ||
1455 | } | 1446 | } |
1456 | 1447 | ||
1457 | static ssize_t write_flush(struct file *file, const char __user *buf, | 1448 | static ssize_t write_flush(struct file *file, const char __user *buf, |
@@ -1611,21 +1602,12 @@ static const struct file_operations cache_flush_operations_procfs = { | |||
1611 | .llseek = no_llseek, | 1602 | .llseek = no_llseek, |
1612 | }; | 1603 | }; |
1613 | 1604 | ||
1614 | static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net) | 1605 | static void remove_cache_proc_entries(struct cache_detail *cd) |
1615 | { | 1606 | { |
1616 | struct sunrpc_net *sn; | 1607 | if (cd->procfs) { |
1617 | 1608 | proc_remove(cd->procfs); | |
1618 | if (cd->u.procfs.proc_ent == NULL) | 1609 | cd->procfs = NULL; |
1619 | return; | 1610 | } |
1620 | if (cd->u.procfs.flush_ent) | ||
1621 | remove_proc_entry("flush", cd->u.procfs.proc_ent); | ||
1622 | if (cd->u.procfs.channel_ent) | ||
1623 | remove_proc_entry("channel", cd->u.procfs.proc_ent); | ||
1624 | if (cd->u.procfs.content_ent) | ||
1625 | remove_proc_entry("content", cd->u.procfs.proc_ent); | ||
1626 | cd->u.procfs.proc_ent = NULL; | ||
1627 | sn = net_generic(net, sunrpc_net_id); | ||
1628 | remove_proc_entry(cd->name, sn->proc_net_rpc); | ||
1629 | } | 1611 | } |
1630 | 1612 | ||
1631 | #ifdef CONFIG_PROC_FS | 1613 | #ifdef CONFIG_PROC_FS |
@@ -1635,38 +1617,30 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | |||
1635 | struct sunrpc_net *sn; | 1617 | struct sunrpc_net *sn; |
1636 | 1618 | ||
1637 | sn = net_generic(net, sunrpc_net_id); | 1619 | sn = net_generic(net, sunrpc_net_id); |
1638 | cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); | 1620 | cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); |
1639 | if (cd->u.procfs.proc_ent == NULL) | 1621 | if (cd->procfs == NULL) |
1640 | goto out_nomem; | 1622 | goto out_nomem; |
1641 | cd->u.procfs.channel_ent = NULL; | ||
1642 | cd->u.procfs.content_ent = NULL; | ||
1643 | 1623 | ||
1644 | p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, | 1624 | p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, |
1645 | cd->u.procfs.proc_ent, | 1625 | cd->procfs, &cache_flush_operations_procfs, cd); |
1646 | &cache_flush_operations_procfs, cd); | ||
1647 | cd->u.procfs.flush_ent = p; | ||
1648 | if (p == NULL) | 1626 | if (p == NULL) |
1649 | goto out_nomem; | 1627 | goto out_nomem; |
1650 | 1628 | ||
1651 | if (cd->cache_request || cd->cache_parse) { | 1629 | if (cd->cache_request || cd->cache_parse) { |
1652 | p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, | 1630 | p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, |
1653 | cd->u.procfs.proc_ent, | 1631 | cd->procfs, &cache_file_operations_procfs, cd); |
1654 | &cache_file_operations_procfs, cd); | ||
1655 | cd->u.procfs.channel_ent = p; | ||
1656 | if (p == NULL) | 1632 | if (p == NULL) |
1657 | goto out_nomem; | 1633 | goto out_nomem; |
1658 | } | 1634 | } |
1659 | if (cd->cache_show) { | 1635 | if (cd->cache_show) { |
1660 | p = proc_create_data("content", S_IFREG|S_IRUSR, | 1636 | p = proc_create_data("content", S_IFREG|S_IRUSR, |
1661 | cd->u.procfs.proc_ent, | 1637 | cd->procfs, &content_file_operations_procfs, cd); |
1662 | &content_file_operations_procfs, cd); | ||
1663 | cd->u.procfs.content_ent = p; | ||
1664 | if (p == NULL) | 1638 | if (p == NULL) |
1665 | goto out_nomem; | 1639 | goto out_nomem; |
1666 | } | 1640 | } |
1667 | return 0; | 1641 | return 0; |
1668 | out_nomem: | 1642 | out_nomem: |
1669 | remove_cache_proc_entries(cd, net); | 1643 | remove_cache_proc_entries(cd); |
1670 | return -ENOMEM; | 1644 | return -ENOMEM; |
1671 | } | 1645 | } |
1672 | #else /* CONFIG_PROC_FS */ | 1646 | #else /* CONFIG_PROC_FS */ |
@@ -1695,7 +1669,7 @@ EXPORT_SYMBOL_GPL(cache_register_net); | |||
1695 | 1669 | ||
1696 | void cache_unregister_net(struct cache_detail *cd, struct net *net) | 1670 | void cache_unregister_net(struct cache_detail *cd, struct net *net) |
1697 | { | 1671 | { |
1698 | remove_cache_proc_entries(cd, net); | 1672 | remove_cache_proc_entries(cd); |
1699 | sunrpc_destroy_cache_detail(cd); | 1673 | sunrpc_destroy_cache_detail(cd); |
1700 | } | 1674 | } |
1701 | EXPORT_SYMBOL_GPL(cache_unregister_net); | 1675 | EXPORT_SYMBOL_GPL(cache_unregister_net); |
@@ -1854,15 +1828,17 @@ int sunrpc_cache_register_pipefs(struct dentry *parent, | |||
1854 | struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); | 1828 | struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); |
1855 | if (IS_ERR(dir)) | 1829 | if (IS_ERR(dir)) |
1856 | return PTR_ERR(dir); | 1830 | return PTR_ERR(dir); |
1857 | cd->u.pipefs.dir = dir; | 1831 | cd->pipefs = dir; |
1858 | return 0; | 1832 | return 0; |
1859 | } | 1833 | } |
1860 | EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); | 1834 | EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); |
1861 | 1835 | ||
1862 | void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) | 1836 | void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) |
1863 | { | 1837 | { |
1864 | rpc_remove_cache_dir(cd->u.pipefs.dir); | 1838 | if (cd->pipefs) { |
1865 | cd->u.pipefs.dir = NULL; | 1839 | rpc_remove_cache_dir(cd->pipefs); |
1840 | cd->pipefs = NULL; | ||
1841 | } | ||
1866 | } | 1842 | } |
1867 | EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); | 1843 | EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); |
1868 | 1844 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 1dc9f3bac099..52da3ce54bb5 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1453,21 +1453,6 @@ size_t rpc_max_bc_payload(struct rpc_clnt *clnt) | |||
1453 | EXPORT_SYMBOL_GPL(rpc_max_bc_payload); | 1453 | EXPORT_SYMBOL_GPL(rpc_max_bc_payload); |
1454 | 1454 | ||
1455 | /** | 1455 | /** |
1456 | * rpc_get_timeout - Get timeout for transport in units of HZ | ||
1457 | * @clnt: RPC client to query | ||
1458 | */ | ||
1459 | unsigned long rpc_get_timeout(struct rpc_clnt *clnt) | ||
1460 | { | ||
1461 | unsigned long ret; | ||
1462 | |||
1463 | rcu_read_lock(); | ||
1464 | ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval; | ||
1465 | rcu_read_unlock(); | ||
1466 | return ret; | ||
1467 | } | ||
1468 | EXPORT_SYMBOL_GPL(rpc_get_timeout); | ||
1469 | |||
1470 | /** | ||
1471 | * rpc_force_rebind - force transport to check that remote port is unchanged | 1456 | * rpc_force_rebind - force transport to check that remote port is unchanged |
1472 | * @clnt: client to rebind | 1457 | * @clnt: client to rebind |
1473 | * | 1458 | * |
@@ -2699,6 +2684,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2699 | { | 2684 | { |
2700 | struct rpc_xprt_switch *xps; | 2685 | struct rpc_xprt_switch *xps; |
2701 | struct rpc_xprt *xprt; | 2686 | struct rpc_xprt *xprt; |
2687 | unsigned long connect_timeout; | ||
2702 | unsigned long reconnect_timeout; | 2688 | unsigned long reconnect_timeout; |
2703 | unsigned char resvport; | 2689 | unsigned char resvport; |
2704 | int ret = 0; | 2690 | int ret = 0; |
@@ -2711,6 +2697,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2711 | return -EAGAIN; | 2697 | return -EAGAIN; |
2712 | } | 2698 | } |
2713 | resvport = xprt->resvport; | 2699 | resvport = xprt->resvport; |
2700 | connect_timeout = xprt->connect_timeout; | ||
2714 | reconnect_timeout = xprt->max_reconnect_timeout; | 2701 | reconnect_timeout = xprt->max_reconnect_timeout; |
2715 | rcu_read_unlock(); | 2702 | rcu_read_unlock(); |
2716 | 2703 | ||
@@ -2720,7 +2707,10 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, | |||
2720 | goto out_put_switch; | 2707 | goto out_put_switch; |
2721 | } | 2708 | } |
2722 | xprt->resvport = resvport; | 2709 | xprt->resvport = resvport; |
2723 | xprt->max_reconnect_timeout = reconnect_timeout; | 2710 | if (xprt->ops->set_connect_timeout != NULL) |
2711 | xprt->ops->set_connect_timeout(xprt, | ||
2712 | connect_timeout, | ||
2713 | reconnect_timeout); | ||
2724 | 2714 | ||
2725 | rpc_xprt_switch_set_roundrobin(xps); | 2715 | rpc_xprt_switch_set_roundrobin(xps); |
2726 | if (setup) { | 2716 | if (setup) { |
@@ -2737,26 +2727,39 @@ out_put_switch: | |||
2737 | } | 2727 | } |
2738 | EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); | 2728 | EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); |
2739 | 2729 | ||
2730 | struct connect_timeout_data { | ||
2731 | unsigned long connect_timeout; | ||
2732 | unsigned long reconnect_timeout; | ||
2733 | }; | ||
2734 | |||
2740 | static int | 2735 | static int |
2741 | rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt, | 2736 | rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, |
2742 | struct rpc_xprt *xprt, | 2737 | struct rpc_xprt *xprt, |
2743 | void *data) | 2738 | void *data) |
2744 | { | 2739 | { |
2745 | unsigned long timeout = *((unsigned long *)data); | 2740 | struct connect_timeout_data *timeo = data; |
2746 | 2741 | ||
2747 | if (timeout < xprt->max_reconnect_timeout) | 2742 | if (xprt->ops->set_connect_timeout) |
2748 | xprt->max_reconnect_timeout = timeout; | 2743 | xprt->ops->set_connect_timeout(xprt, |
2744 | timeo->connect_timeout, | ||
2745 | timeo->reconnect_timeout); | ||
2749 | return 0; | 2746 | return 0; |
2750 | } | 2747 | } |
2751 | 2748 | ||
2752 | void | 2749 | void |
2753 | rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo) | 2750 | rpc_set_connect_timeout(struct rpc_clnt *clnt, |
2751 | unsigned long connect_timeout, | ||
2752 | unsigned long reconnect_timeout) | ||
2754 | { | 2753 | { |
2754 | struct connect_timeout_data timeout = { | ||
2755 | .connect_timeout = connect_timeout, | ||
2756 | .reconnect_timeout = reconnect_timeout, | ||
2757 | }; | ||
2755 | rpc_clnt_iterate_for_each_xprt(clnt, | 2758 | rpc_clnt_iterate_for_each_xprt(clnt, |
2756 | rpc_xprt_cap_max_reconnect_timeout, | 2759 | rpc_xprt_set_connect_timeout, |
2757 | &timeo); | 2760 | &timeout); |
2758 | } | 2761 | } |
2759 | EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); | 2762 | EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); |
2760 | 2763 | ||
2761 | void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) | 2764 | void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) |
2762 | { | 2765 | { |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e7b4d93566df..c8fd0b6c1618 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -16,11 +16,6 @@ static struct dentry *rpc_xprt_dir; | |||
16 | 16 | ||
17 | unsigned int rpc_inject_disconnect; | 17 | unsigned int rpc_inject_disconnect; |
18 | 18 | ||
19 | struct rpc_clnt_iter { | ||
20 | struct rpc_clnt *clnt; | ||
21 | loff_t pos; | ||
22 | }; | ||
23 | |||
24 | static int | 19 | static int |
25 | tasks_show(struct seq_file *f, void *v) | 20 | tasks_show(struct seq_file *f, void *v) |
26 | { | 21 | { |
@@ -47,12 +42,10 @@ static void * | |||
47 | tasks_start(struct seq_file *f, loff_t *ppos) | 42 | tasks_start(struct seq_file *f, loff_t *ppos) |
48 | __acquires(&clnt->cl_lock) | 43 | __acquires(&clnt->cl_lock) |
49 | { | 44 | { |
50 | struct rpc_clnt_iter *iter = f->private; | 45 | struct rpc_clnt *clnt = f->private; |
51 | loff_t pos = *ppos; | 46 | loff_t pos = *ppos; |
52 | struct rpc_clnt *clnt = iter->clnt; | ||
53 | struct rpc_task *task; | 47 | struct rpc_task *task; |
54 | 48 | ||
55 | iter->pos = pos + 1; | ||
56 | spin_lock(&clnt->cl_lock); | 49 | spin_lock(&clnt->cl_lock); |
57 | list_for_each_entry(task, &clnt->cl_tasks, tk_task) | 50 | list_for_each_entry(task, &clnt->cl_tasks, tk_task) |
58 | if (pos-- == 0) | 51 | if (pos-- == 0) |
@@ -63,12 +56,10 @@ tasks_start(struct seq_file *f, loff_t *ppos) | |||
63 | static void * | 56 | static void * |
64 | tasks_next(struct seq_file *f, void *v, loff_t *pos) | 57 | tasks_next(struct seq_file *f, void *v, loff_t *pos) |
65 | { | 58 | { |
66 | struct rpc_clnt_iter *iter = f->private; | 59 | struct rpc_clnt *clnt = f->private; |
67 | struct rpc_clnt *clnt = iter->clnt; | ||
68 | struct rpc_task *task = v; | 60 | struct rpc_task *task = v; |
69 | struct list_head *next = task->tk_task.next; | 61 | struct list_head *next = task->tk_task.next; |
70 | 62 | ||
71 | ++iter->pos; | ||
72 | ++*pos; | 63 | ++*pos; |
73 | 64 | ||
74 | /* If there's another task on list, return it */ | 65 | /* If there's another task on list, return it */ |
@@ -81,9 +72,7 @@ static void | |||
81 | tasks_stop(struct seq_file *f, void *v) | 72 | tasks_stop(struct seq_file *f, void *v) |
82 | __releases(&clnt->cl_lock) | 73 | __releases(&clnt->cl_lock) |
83 | { | 74 | { |
84 | struct rpc_clnt_iter *iter = f->private; | 75 | struct rpc_clnt *clnt = f->private; |
85 | struct rpc_clnt *clnt = iter->clnt; | ||
86 | |||
87 | spin_unlock(&clnt->cl_lock); | 76 | spin_unlock(&clnt->cl_lock); |
88 | } | 77 | } |
89 | 78 | ||
@@ -96,17 +85,13 @@ static const struct seq_operations tasks_seq_operations = { | |||
96 | 85 | ||
97 | static int tasks_open(struct inode *inode, struct file *filp) | 86 | static int tasks_open(struct inode *inode, struct file *filp) |
98 | { | 87 | { |
99 | int ret = seq_open_private(filp, &tasks_seq_operations, | 88 | int ret = seq_open(filp, &tasks_seq_operations); |
100 | sizeof(struct rpc_clnt_iter)); | ||
101 | |||
102 | if (!ret) { | 89 | if (!ret) { |
103 | struct seq_file *seq = filp->private_data; | 90 | struct seq_file *seq = filp->private_data; |
104 | struct rpc_clnt_iter *iter = seq->private; | 91 | struct rpc_clnt *clnt = seq->private = inode->i_private; |
105 | |||
106 | iter->clnt = inode->i_private; | ||
107 | 92 | ||
108 | if (!atomic_inc_not_zero(&iter->clnt->cl_count)) { | 93 | if (!atomic_inc_not_zero(&clnt->cl_count)) { |
109 | seq_release_private(inode, filp); | 94 | seq_release(inode, filp); |
110 | ret = -EINVAL; | 95 | ret = -EINVAL; |
111 | } | 96 | } |
112 | } | 97 | } |
@@ -118,10 +103,10 @@ static int | |||
118 | tasks_release(struct inode *inode, struct file *filp) | 103 | tasks_release(struct inode *inode, struct file *filp) |
119 | { | 104 | { |
120 | struct seq_file *seq = filp->private_data; | 105 | struct seq_file *seq = filp->private_data; |
121 | struct rpc_clnt_iter *iter = seq->private; | 106 | struct rpc_clnt *clnt = seq->private; |
122 | 107 | ||
123 | rpc_release_client(iter->clnt); | 108 | rpc_release_client(clnt); |
124 | return seq_release_private(inode, filp); | 109 | return seq_release(inode, filp); |
125 | } | 110 | } |
126 | 111 | ||
127 | static const struct file_operations tasks_fops = { | 112 | static const struct file_operations tasks_fops = { |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 64af4f034de6..f81eaa8e0888 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -403,7 +403,7 @@ svcauth_unix_info_release(struct svc_xprt *xpt) | |||
403 | /**************************************************************************** | 403 | /**************************************************************************** |
404 | * auth.unix.gid cache | 404 | * auth.unix.gid cache |
405 | * simple cache to map a UID to a list of GIDs | 405 | * simple cache to map a UID to a list of GIDs |
406 | * because AUTH_UNIX aka AUTH_SYS has a max of 16 | 406 | * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS |
407 | */ | 407 | */ |
408 | #define GID_HASHBITS 8 | 408 | #define GID_HASHBITS 8 |
409 | #define GID_HASHMAX (1<<GID_HASHBITS) | 409 | #define GID_HASHMAX (1<<GID_HASHBITS) |
@@ -810,7 +810,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
810 | cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ | 810 | cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ |
811 | cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ | 811 | cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ |
812 | slen = svc_getnl(argv); /* gids length */ | 812 | slen = svc_getnl(argv); /* gids length */ |
813 | if (slen > 16 || (len -= (slen + 2)*4) < 0) | 813 | if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0) |
814 | goto badcred; | 814 | goto badcred; |
815 | cred->cr_group_info = groups_alloc(slen); | 815 | cred->cr_group_info = groups_alloc(slen); |
816 | if (cred->cr_group_info == NULL) | 816 | if (cred->cr_group_info == NULL) |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 7f1071e103ca..1f7082144e01 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -1518,3 +1518,37 @@ out: | |||
1518 | } | 1518 | } |
1519 | EXPORT_SYMBOL_GPL(xdr_process_buf); | 1519 | EXPORT_SYMBOL_GPL(xdr_process_buf); |
1520 | 1520 | ||
1521 | /** | ||
1522 | * xdr_stream_decode_string_dup - Decode and duplicate variable length string | ||
1523 | * @xdr: pointer to xdr_stream | ||
1524 | * @str: location to store pointer to string | ||
1525 | * @maxlen: maximum acceptable string length | ||
1526 | * @gfp_flags: GFP mask to use | ||
1527 | * | ||
1528 | * Return values: | ||
1529 | * On success, returns length of NUL-terminated string stored in *@ptr | ||
1530 | * %-EBADMSG on XDR buffer overflow | ||
1531 | * %-EMSGSIZE if the size of the string would exceed @maxlen | ||
1532 | * %-ENOMEM on memory allocation failure | ||
1533 | */ | ||
1534 | ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, | ||
1535 | size_t maxlen, gfp_t gfp_flags) | ||
1536 | { | ||
1537 | void *p; | ||
1538 | ssize_t ret; | ||
1539 | |||
1540 | ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); | ||
1541 | if (ret > 0) { | ||
1542 | char *s = kmalloc(ret + 1, gfp_flags); | ||
1543 | if (s != NULL) { | ||
1544 | memcpy(s, p, ret); | ||
1545 | s[ret] = '\0'; | ||
1546 | *str = s; | ||
1547 | return strlen(s); | ||
1548 | } | ||
1549 | ret = -ENOMEM; | ||
1550 | } | ||
1551 | *str = NULL; | ||
1552 | return ret; | ||
1553 | } | ||
1554 | EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 9a6be030ca7d..b530a2852ba8 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -897,13 +897,11 @@ static void xprt_timer(struct rpc_task *task) | |||
897 | return; | 897 | return; |
898 | dprintk("RPC: %5u xprt_timer\n", task->tk_pid); | 898 | dprintk("RPC: %5u xprt_timer\n", task->tk_pid); |
899 | 899 | ||
900 | spin_lock_bh(&xprt->transport_lock); | ||
901 | if (!req->rq_reply_bytes_recvd) { | 900 | if (!req->rq_reply_bytes_recvd) { |
902 | if (xprt->ops->timer) | 901 | if (xprt->ops->timer) |
903 | xprt->ops->timer(xprt, task); | 902 | xprt->ops->timer(xprt, task); |
904 | } else | 903 | } else |
905 | task->tk_status = 0; | 904 | task->tk_status = 0; |
906 | spin_unlock_bh(&xprt->transport_lock); | ||
907 | } | 905 | } |
908 | 906 | ||
909 | /** | 907 | /** |
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 1ebb09e1ac4f..59e64025ed96 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -310,10 +310,7 @@ fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |||
310 | struct rpcrdma_mw *mw; | 310 | struct rpcrdma_mw *mw; |
311 | 311 | ||
312 | while (!list_empty(&req->rl_registered)) { | 312 | while (!list_empty(&req->rl_registered)) { |
313 | mw = list_first_entry(&req->rl_registered, | 313 | mw = rpcrdma_pop_mw(&req->rl_registered); |
314 | struct rpcrdma_mw, mw_list); | ||
315 | list_del_init(&mw->mw_list); | ||
316 | |||
317 | if (sync) | 314 | if (sync) |
318 | fmr_op_recover_mr(mw); | 315 | fmr_op_recover_mr(mw); |
319 | else | 316 | else |
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 47bed5333c7f..f81dd93176c0 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -466,8 +466,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
466 | struct ib_send_wr *first, **prev, *last, *bad_wr; | 466 | struct ib_send_wr *first, **prev, *last, *bad_wr; |
467 | struct rpcrdma_rep *rep = req->rl_reply; | 467 | struct rpcrdma_rep *rep = req->rl_reply; |
468 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 468 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
469 | struct rpcrdma_mw *mw, *tmp; | ||
470 | struct rpcrdma_frmr *f; | 469 | struct rpcrdma_frmr *f; |
470 | struct rpcrdma_mw *mw; | ||
471 | int count, rc; | 471 | int count, rc; |
472 | 472 | ||
473 | dprintk("RPC: %s: req %p\n", __func__, req); | 473 | dprintk("RPC: %s: req %p\n", __func__, req); |
@@ -534,10 +534,10 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
534 | * them to the free MW list. | 534 | * them to the free MW list. |
535 | */ | 535 | */ |
536 | unmap: | 536 | unmap: |
537 | list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { | 537 | while (!list_empty(&req->rl_registered)) { |
538 | mw = rpcrdma_pop_mw(&req->rl_registered); | ||
538 | dprintk("RPC: %s: DMA unmapping frmr %p\n", | 539 | dprintk("RPC: %s: DMA unmapping frmr %p\n", |
539 | __func__, &mw->frmr); | 540 | __func__, &mw->frmr); |
540 | list_del_init(&mw->mw_list); | ||
541 | ib_dma_unmap_sg(ia->ri_device, | 541 | ib_dma_unmap_sg(ia->ri_device, |
542 | mw->mw_sg, mw->mw_nents, mw->mw_dir); | 542 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
543 | rpcrdma_put_mw(r_xprt, mw); | 543 | rpcrdma_put_mw(r_xprt, mw); |
@@ -571,10 +571,7 @@ frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |||
571 | struct rpcrdma_mw *mw; | 571 | struct rpcrdma_mw *mw; |
572 | 572 | ||
573 | while (!list_empty(&req->rl_registered)) { | 573 | while (!list_empty(&req->rl_registered)) { |
574 | mw = list_first_entry(&req->rl_registered, | 574 | mw = rpcrdma_pop_mw(&req->rl_registered); |
575 | struct rpcrdma_mw, mw_list); | ||
576 | list_del_init(&mw->mw_list); | ||
577 | |||
578 | if (sync) | 575 | if (sync) |
579 | frwr_op_recover_mr(mw); | 576 | frwr_op_recover_mr(mw); |
580 | else | 577 | else |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index c52e0f2ffe52..a044be2d6ad7 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -125,14 +125,34 @@ void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) | |||
125 | /* The client can send a request inline as long as the RPCRDMA header | 125 | /* The client can send a request inline as long as the RPCRDMA header |
126 | * plus the RPC call fit under the transport's inline limit. If the | 126 | * plus the RPC call fit under the transport's inline limit. If the |
127 | * combined call message size exceeds that limit, the client must use | 127 | * combined call message size exceeds that limit, the client must use |
128 | * the read chunk list for this operation. | 128 | * a Read chunk for this operation. |
129 | * | ||
130 | * A Read chunk is also required if sending the RPC call inline would | ||
131 | * exceed this device's max_sge limit. | ||
129 | */ | 132 | */ |
130 | static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, | 133 | static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, |
131 | struct rpc_rqst *rqst) | 134 | struct rpc_rqst *rqst) |
132 | { | 135 | { |
133 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 136 | struct xdr_buf *xdr = &rqst->rq_snd_buf; |
137 | unsigned int count, remaining, offset; | ||
138 | |||
139 | if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) | ||
140 | return false; | ||
141 | |||
142 | if (xdr->page_len) { | ||
143 | remaining = xdr->page_len; | ||
144 | offset = xdr->page_base & ~PAGE_MASK; | ||
145 | count = 0; | ||
146 | while (remaining) { | ||
147 | remaining -= min_t(unsigned int, | ||
148 | PAGE_SIZE - offset, remaining); | ||
149 | offset = 0; | ||
150 | if (++count > r_xprt->rx_ia.ri_max_send_sges) | ||
151 | return false; | ||
152 | } | ||
153 | } | ||
134 | 154 | ||
135 | return rqst->rq_snd_buf.len <= ia->ri_max_inline_write; | 155 | return true; |
136 | } | 156 | } |
137 | 157 | ||
138 | /* The client can't know how large the actual reply will be. Thus it | 158 | /* The client can't know how large the actual reply will be. Thus it |
@@ -186,9 +206,9 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) | |||
186 | */ | 206 | */ |
187 | 207 | ||
188 | static int | 208 | static int |
189 | rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, | 209 | rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, |
190 | enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, | 210 | unsigned int pos, enum rpcrdma_chunktype type, |
191 | bool reminv_expected) | 211 | struct rpcrdma_mr_seg *seg) |
192 | { | 212 | { |
193 | int len, n, p, page_base; | 213 | int len, n, p, page_base; |
194 | struct page **ppages; | 214 | struct page **ppages; |
@@ -226,22 +246,21 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, | |||
226 | if (len && n == RPCRDMA_MAX_SEGS) | 246 | if (len && n == RPCRDMA_MAX_SEGS) |
227 | goto out_overflow; | 247 | goto out_overflow; |
228 | 248 | ||
229 | /* When encoding the read list, the tail is always sent inline */ | 249 | /* When encoding a Read chunk, the tail iovec contains an |
230 | if (type == rpcrdma_readch) | 250 | * XDR pad and may be omitted. |
251 | */ | ||
252 | if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) | ||
231 | return n; | 253 | return n; |
232 | 254 | ||
233 | /* When encoding the Write list, some servers need to see an extra | 255 | /* When encoding a Write chunk, some servers need to see an |
234 | * segment for odd-length Write chunks. The upper layer provides | 256 | * extra segment for non-XDR-aligned Write chunks. The upper |
235 | * space in the tail iovec for this purpose. | 257 | * layer provides space in the tail iovec that may be used |
258 | * for this purpose. | ||
236 | */ | 259 | */ |
237 | if (type == rpcrdma_writech && reminv_expected) | 260 | if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) |
238 | return n; | 261 | return n; |
239 | 262 | ||
240 | if (xdrbuf->tail[0].iov_len) { | 263 | if (xdrbuf->tail[0].iov_len) { |
241 | /* the rpcrdma protocol allows us to omit any trailing | ||
242 | * xdr pad bytes, saving the server an RDMA operation. */ | ||
243 | if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) | ||
244 | return n; | ||
245 | n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); | 264 | n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); |
246 | if (n == RPCRDMA_MAX_SEGS) | 265 | if (n == RPCRDMA_MAX_SEGS) |
247 | goto out_overflow; | 266 | goto out_overflow; |
@@ -293,7 +312,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, | |||
293 | if (rtype == rpcrdma_areadch) | 312 | if (rtype == rpcrdma_areadch) |
294 | pos = 0; | 313 | pos = 0; |
295 | seg = req->rl_segments; | 314 | seg = req->rl_segments; |
296 | nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false); | 315 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, |
316 | rtype, seg); | ||
297 | if (nsegs < 0) | 317 | if (nsegs < 0) |
298 | return ERR_PTR(nsegs); | 318 | return ERR_PTR(nsegs); |
299 | 319 | ||
@@ -302,7 +322,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, | |||
302 | false, &mw); | 322 | false, &mw); |
303 | if (n < 0) | 323 | if (n < 0) |
304 | return ERR_PTR(n); | 324 | return ERR_PTR(n); |
305 | list_add(&mw->mw_list, &req->rl_registered); | 325 | rpcrdma_push_mw(mw, &req->rl_registered); |
306 | 326 | ||
307 | *iptr++ = xdr_one; /* item present */ | 327 | *iptr++ = xdr_one; /* item present */ |
308 | 328 | ||
@@ -355,10 +375,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |||
355 | } | 375 | } |
356 | 376 | ||
357 | seg = req->rl_segments; | 377 | seg = req->rl_segments; |
358 | nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, | 378 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, |
359 | rqst->rq_rcv_buf.head[0].iov_len, | 379 | rqst->rq_rcv_buf.head[0].iov_len, |
360 | wtype, seg, | 380 | wtype, seg); |
361 | r_xprt->rx_ia.ri_reminv_expected); | ||
362 | if (nsegs < 0) | 381 | if (nsegs < 0) |
363 | return ERR_PTR(nsegs); | 382 | return ERR_PTR(nsegs); |
364 | 383 | ||
@@ -371,7 +390,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |||
371 | true, &mw); | 390 | true, &mw); |
372 | if (n < 0) | 391 | if (n < 0) |
373 | return ERR_PTR(n); | 392 | return ERR_PTR(n); |
374 | list_add(&mw->mw_list, &req->rl_registered); | 393 | rpcrdma_push_mw(mw, &req->rl_registered); |
375 | 394 | ||
376 | iptr = xdr_encode_rdma_segment(iptr, mw); | 395 | iptr = xdr_encode_rdma_segment(iptr, mw); |
377 | 396 | ||
@@ -423,8 +442,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, | |||
423 | } | 442 | } |
424 | 443 | ||
425 | seg = req->rl_segments; | 444 | seg = req->rl_segments; |
426 | nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg, | 445 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); |
427 | r_xprt->rx_ia.ri_reminv_expected); | ||
428 | if (nsegs < 0) | 446 | if (nsegs < 0) |
429 | return ERR_PTR(nsegs); | 447 | return ERR_PTR(nsegs); |
430 | 448 | ||
@@ -437,7 +455,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, | |||
437 | true, &mw); | 455 | true, &mw); |
438 | if (n < 0) | 456 | if (n < 0) |
439 | return ERR_PTR(n); | 457 | return ERR_PTR(n); |
440 | list_add(&mw->mw_list, &req->rl_registered); | 458 | rpcrdma_push_mw(mw, &req->rl_registered); |
441 | 459 | ||
442 | iptr = xdr_encode_rdma_segment(iptr, mw); | 460 | iptr = xdr_encode_rdma_segment(iptr, mw); |
443 | 461 | ||
@@ -741,13 +759,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
741 | iptr = headerp->rm_body.rm_chunks; | 759 | iptr = headerp->rm_body.rm_chunks; |
742 | iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); | 760 | iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); |
743 | if (IS_ERR(iptr)) | 761 | if (IS_ERR(iptr)) |
744 | goto out_unmap; | 762 | goto out_err; |
745 | iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); | 763 | iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); |
746 | if (IS_ERR(iptr)) | 764 | if (IS_ERR(iptr)) |
747 | goto out_unmap; | 765 | goto out_err; |
748 | iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); | 766 | iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); |
749 | if (IS_ERR(iptr)) | 767 | if (IS_ERR(iptr)) |
750 | goto out_unmap; | 768 | goto out_err; |
751 | hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; | 769 | hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; |
752 | 770 | ||
753 | dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", | 771 | dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", |
@@ -758,12 +776,14 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
758 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, | 776 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, |
759 | &rqst->rq_snd_buf, rtype)) { | 777 | &rqst->rq_snd_buf, rtype)) { |
760 | iptr = ERR_PTR(-EIO); | 778 | iptr = ERR_PTR(-EIO); |
761 | goto out_unmap; | 779 | goto out_err; |
762 | } | 780 | } |
763 | return 0; | 781 | return 0; |
764 | 782 | ||
765 | out_unmap: | 783 | out_err: |
766 | r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false); | 784 | pr_err("rpcrdma: rpcrdma_marshal_req failed, status %ld\n", |
785 | PTR_ERR(iptr)); | ||
786 | r_xprt->rx_stats.failed_marshal_count++; | ||
767 | return PTR_ERR(iptr); | 787 | return PTR_ERR(iptr); |
768 | } | 788 | } |
769 | 789 | ||
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 534c178d2a7e..c717f5410776 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -67,7 +67,7 @@ unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; | |||
67 | static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; | 67 | static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; |
68 | static unsigned int xprt_rdma_inline_write_padding; | 68 | static unsigned int xprt_rdma_inline_write_padding; |
69 | static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; | 69 | static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; |
70 | int xprt_rdma_pad_optimize = 1; | 70 | int xprt_rdma_pad_optimize = 0; |
71 | 71 | ||
72 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 72 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
73 | 73 | ||
@@ -709,10 +709,6 @@ xprt_rdma_send_request(struct rpc_task *task) | |||
709 | return 0; | 709 | return 0; |
710 | 710 | ||
711 | failed_marshal: | 711 | failed_marshal: |
712 | dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n", | ||
713 | __func__, rc); | ||
714 | if (rc == -EIO) | ||
715 | r_xprt->rx_stats.failed_marshal_count++; | ||
716 | if (rc != -ENOTCONN) | 712 | if (rc != -ENOTCONN) |
717 | return rc; | 713 | return rc; |
718 | drop_connection: | 714 | drop_connection: |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 11d07748f699..81cd31acf690 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/sunrpc/svc_rdma.h> | 54 | #include <linux/sunrpc/svc_rdma.h> |
55 | #include <asm/bitops.h> | 55 | #include <asm/bitops.h> |
56 | #include <linux/module.h> /* try_module_get()/module_put() */ | 56 | #include <linux/module.h> /* try_module_get()/module_put() */ |
57 | #include <rdma/ib_cm.h> | ||
57 | 58 | ||
58 | #include "xprt_rdma.h" | 59 | #include "xprt_rdma.h" |
59 | 60 | ||
@@ -208,6 +209,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |||
208 | 209 | ||
209 | /* Default settings for RPC-over-RDMA Version One */ | 210 | /* Default settings for RPC-over-RDMA Version One */ |
210 | r_xprt->rx_ia.ri_reminv_expected = false; | 211 | r_xprt->rx_ia.ri_reminv_expected = false; |
212 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; | ||
211 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; | 213 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
212 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; | 214 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
213 | 215 | ||
@@ -215,6 +217,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |||
215 | pmsg->cp_magic == rpcrdma_cmp_magic && | 217 | pmsg->cp_magic == rpcrdma_cmp_magic && |
216 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | 218 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { |
217 | r_xprt->rx_ia.ri_reminv_expected = true; | 219 | r_xprt->rx_ia.ri_reminv_expected = true; |
220 | r_xprt->rx_ia.ri_implicit_roundup = true; | ||
218 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); | 221 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
219 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); | 222 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); |
220 | } | 223 | } |
@@ -277,7 +280,14 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
277 | connstate = -ENETDOWN; | 280 | connstate = -ENETDOWN; |
278 | goto connected; | 281 | goto connected; |
279 | case RDMA_CM_EVENT_REJECTED: | 282 | case RDMA_CM_EVENT_REJECTED: |
283 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
284 | pr_info("rpcrdma: connection to %pIS:%u on %s rejected: %s\n", | ||
285 | sap, rpc_get_port(sap), ia->ri_device->name, | ||
286 | rdma_reject_msg(id, event->status)); | ||
287 | #endif | ||
280 | connstate = -ECONNREFUSED; | 288 | connstate = -ECONNREFUSED; |
289 | if (event->status == IB_CM_REJ_STALE_CONN) | ||
290 | connstate = -EAGAIN; | ||
281 | goto connected; | 291 | goto connected; |
282 | case RDMA_CM_EVENT_DISCONNECTED: | 292 | case RDMA_CM_EVENT_DISCONNECTED: |
283 | connstate = -ECONNABORTED; | 293 | connstate = -ECONNABORTED; |
@@ -486,18 +496,19 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia) | |||
486 | */ | 496 | */ |
487 | int | 497 | int |
488 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | 498 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
489 | struct rpcrdma_create_data_internal *cdata) | 499 | struct rpcrdma_create_data_internal *cdata) |
490 | { | 500 | { |
491 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; | 501 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
502 | unsigned int max_qp_wr, max_sge; | ||
492 | struct ib_cq *sendcq, *recvcq; | 503 | struct ib_cq *sendcq, *recvcq; |
493 | unsigned int max_qp_wr; | ||
494 | int rc; | 504 | int rc; |
495 | 505 | ||
496 | if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) { | 506 | max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); |
497 | dprintk("RPC: %s: insufficient sge's available\n", | 507 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
498 | __func__); | 508 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
499 | return -ENOMEM; | 509 | return -ENOMEM; |
500 | } | 510 | } |
511 | ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; | ||
501 | 512 | ||
502 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { | 513 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
503 | dprintk("RPC: %s: insufficient wqe's available\n", | 514 | dprintk("RPC: %s: insufficient wqe's available\n", |
@@ -522,7 +533,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
522 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; | 533 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
523 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; | 534 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
524 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ | 535 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ |
525 | ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES; | 536 | ep->rep_attr.cap.max_send_sge = max_sge; |
526 | ep->rep_attr.cap.max_recv_sge = 1; | 537 | ep->rep_attr.cap.max_recv_sge = 1; |
527 | ep->rep_attr.cap.max_inline_data = 0; | 538 | ep->rep_attr.cap.max_inline_data = 0; |
528 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 539 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
@@ -640,20 +651,21 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |||
640 | int | 651 | int |
641 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | 652 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
642 | { | 653 | { |
654 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, | ||
655 | rx_ia); | ||
643 | struct rdma_cm_id *id, *old; | 656 | struct rdma_cm_id *id, *old; |
657 | struct sockaddr *sap; | ||
658 | unsigned int extras; | ||
644 | int rc = 0; | 659 | int rc = 0; |
645 | int retry_count = 0; | ||
646 | 660 | ||
647 | if (ep->rep_connected != 0) { | 661 | if (ep->rep_connected != 0) { |
648 | struct rpcrdma_xprt *xprt; | ||
649 | retry: | 662 | retry: |
650 | dprintk("RPC: %s: reconnecting...\n", __func__); | 663 | dprintk("RPC: %s: reconnecting...\n", __func__); |
651 | 664 | ||
652 | rpcrdma_ep_disconnect(ep, ia); | 665 | rpcrdma_ep_disconnect(ep, ia); |
653 | 666 | ||
654 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); | 667 | sap = (struct sockaddr *)&r_xprt->rx_data.addr; |
655 | id = rpcrdma_create_id(xprt, ia, | 668 | id = rpcrdma_create_id(r_xprt, ia, sap); |
656 | (struct sockaddr *)&xprt->rx_data.addr); | ||
657 | if (IS_ERR(id)) { | 669 | if (IS_ERR(id)) { |
658 | rc = -EHOSTUNREACH; | 670 | rc = -EHOSTUNREACH; |
659 | goto out; | 671 | goto out; |
@@ -708,51 +720,18 @@ retry: | |||
708 | } | 720 | } |
709 | 721 | ||
710 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); | 722 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
711 | |||
712 | /* | ||
713 | * Check state. A non-peer reject indicates no listener | ||
714 | * (ECONNREFUSED), which may be a transient state. All | ||
715 | * others indicate a transport condition which has already | ||
716 | * undergone a best-effort. | ||
717 | */ | ||
718 | if (ep->rep_connected == -ECONNREFUSED && | ||
719 | ++retry_count <= RDMA_CONNECT_RETRY_MAX) { | ||
720 | dprintk("RPC: %s: non-peer_reject, retry\n", __func__); | ||
721 | goto retry; | ||
722 | } | ||
723 | if (ep->rep_connected <= 0) { | 723 | if (ep->rep_connected <= 0) { |
724 | /* Sometimes, the only way to reliably connect to remote | 724 | if (ep->rep_connected == -EAGAIN) |
725 | * CMs is to use same nonzero values for ORD and IRD. */ | ||
726 | if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 && | ||
727 | (ep->rep_remote_cma.responder_resources == 0 || | ||
728 | ep->rep_remote_cma.initiator_depth != | ||
729 | ep->rep_remote_cma.responder_resources)) { | ||
730 | if (ep->rep_remote_cma.responder_resources == 0) | ||
731 | ep->rep_remote_cma.responder_resources = 1; | ||
732 | ep->rep_remote_cma.initiator_depth = | ||
733 | ep->rep_remote_cma.responder_resources; | ||
734 | goto retry; | 725 | goto retry; |
735 | } | ||
736 | rc = ep->rep_connected; | 726 | rc = ep->rep_connected; |
737 | } else { | 727 | goto out; |
738 | struct rpcrdma_xprt *r_xprt; | ||
739 | unsigned int extras; | ||
740 | |||
741 | dprintk("RPC: %s: connected\n", __func__); | ||
742 | |||
743 | r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); | ||
744 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; | ||
745 | |||
746 | if (extras) { | ||
747 | rc = rpcrdma_ep_post_extra_recv(r_xprt, extras); | ||
748 | if (rc) { | ||
749 | pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n", | ||
750 | __func__, rc); | ||
751 | rc = 0; | ||
752 | } | ||
753 | } | ||
754 | } | 728 | } |
755 | 729 | ||
730 | dprintk("RPC: %s: connected\n", __func__); | ||
731 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; | ||
732 | if (extras) | ||
733 | rpcrdma_ep_post_extra_recv(r_xprt, extras); | ||
734 | |||
756 | out: | 735 | out: |
757 | if (rc) | 736 | if (rc) |
758 | ep->rep_connected = rc; | 737 | ep->rep_connected = rc; |
@@ -797,9 +776,7 @@ rpcrdma_mr_recovery_worker(struct work_struct *work) | |||
797 | 776 | ||
798 | spin_lock(&buf->rb_recovery_lock); | 777 | spin_lock(&buf->rb_recovery_lock); |
799 | while (!list_empty(&buf->rb_stale_mrs)) { | 778 | while (!list_empty(&buf->rb_stale_mrs)) { |
800 | mw = list_first_entry(&buf->rb_stale_mrs, | 779 | mw = rpcrdma_pop_mw(&buf->rb_stale_mrs); |
801 | struct rpcrdma_mw, mw_list); | ||
802 | list_del_init(&mw->mw_list); | ||
803 | spin_unlock(&buf->rb_recovery_lock); | 780 | spin_unlock(&buf->rb_recovery_lock); |
804 | 781 | ||
805 | dprintk("RPC: %s: recovering MR %p\n", __func__, mw); | 782 | dprintk("RPC: %s: recovering MR %p\n", __func__, mw); |
@@ -817,7 +794,7 @@ rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) | |||
817 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | 794 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
818 | 795 | ||
819 | spin_lock(&buf->rb_recovery_lock); | 796 | spin_lock(&buf->rb_recovery_lock); |
820 | list_add(&mw->mw_list, &buf->rb_stale_mrs); | 797 | rpcrdma_push_mw(mw, &buf->rb_stale_mrs); |
821 | spin_unlock(&buf->rb_recovery_lock); | 798 | spin_unlock(&buf->rb_recovery_lock); |
822 | 799 | ||
823 | schedule_delayed_work(&buf->rb_recovery_worker, 0); | 800 | schedule_delayed_work(&buf->rb_recovery_worker, 0); |
@@ -1093,11 +1070,8 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) | |||
1093 | struct rpcrdma_mw *mw = NULL; | 1070 | struct rpcrdma_mw *mw = NULL; |
1094 | 1071 | ||
1095 | spin_lock(&buf->rb_mwlock); | 1072 | spin_lock(&buf->rb_mwlock); |
1096 | if (!list_empty(&buf->rb_mws)) { | 1073 | if (!list_empty(&buf->rb_mws)) |
1097 | mw = list_first_entry(&buf->rb_mws, | 1074 | mw = rpcrdma_pop_mw(&buf->rb_mws); |
1098 | struct rpcrdma_mw, mw_list); | ||
1099 | list_del_init(&mw->mw_list); | ||
1100 | } | ||
1101 | spin_unlock(&buf->rb_mwlock); | 1075 | spin_unlock(&buf->rb_mwlock); |
1102 | 1076 | ||
1103 | if (!mw) | 1077 | if (!mw) |
@@ -1120,7 +1094,7 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) | |||
1120 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | 1094 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1121 | 1095 | ||
1122 | spin_lock(&buf->rb_mwlock); | 1096 | spin_lock(&buf->rb_mwlock); |
1123 | list_add_tail(&mw->mw_list, &buf->rb_mws); | 1097 | rpcrdma_push_mw(mw, &buf->rb_mws); |
1124 | spin_unlock(&buf->rb_mwlock); | 1098 | spin_unlock(&buf->rb_mwlock); |
1125 | } | 1099 | } |
1126 | 1100 | ||
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index e35efd4ac1e4..171a35116de9 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -74,7 +74,9 @@ struct rpcrdma_ia { | |||
74 | unsigned int ri_max_frmr_depth; | 74 | unsigned int ri_max_frmr_depth; |
75 | unsigned int ri_max_inline_write; | 75 | unsigned int ri_max_inline_write; |
76 | unsigned int ri_max_inline_read; | 76 | unsigned int ri_max_inline_read; |
77 | unsigned int ri_max_send_sges; | ||
77 | bool ri_reminv_expected; | 78 | bool ri_reminv_expected; |
79 | bool ri_implicit_roundup; | ||
78 | enum ib_mr_type ri_mrtype; | 80 | enum ib_mr_type ri_mrtype; |
79 | struct ib_qp_attr ri_qp_attr; | 81 | struct ib_qp_attr ri_qp_attr; |
80 | struct ib_qp_init_attr ri_qp_init_attr; | 82 | struct ib_qp_init_attr ri_qp_init_attr; |
@@ -303,15 +305,19 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ | |||
303 | char *mr_offset; /* kva if no page, else offset */ | 305 | char *mr_offset; /* kva if no page, else offset */ |
304 | }; | 306 | }; |
305 | 307 | ||
306 | /* Reserve enough Send SGEs to send a maximum size inline request: | 308 | /* The Send SGE array is provisioned to send a maximum size |
309 | * inline request: | ||
307 | * - RPC-over-RDMA header | 310 | * - RPC-over-RDMA header |
308 | * - xdr_buf head iovec | 311 | * - xdr_buf head iovec |
309 | * - RPCRDMA_MAX_INLINE bytes, possibly unaligned, in pages | 312 | * - RPCRDMA_MAX_INLINE bytes, in pages |
310 | * - xdr_buf tail iovec | 313 | * - xdr_buf tail iovec |
314 | * | ||
315 | * The actual number of array elements consumed by each RPC | ||
316 | * depends on the device's max_sge limit. | ||
311 | */ | 317 | */ |
312 | enum { | 318 | enum { |
313 | RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1, | 319 | RPCRDMA_MIN_SEND_SGES = 3, |
314 | RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1, | 320 | RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT, |
315 | RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, | 321 | RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, |
316 | }; | 322 | }; |
317 | 323 | ||
@@ -348,6 +354,22 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) | |||
348 | return rqst->rq_xprtdata; | 354 | return rqst->rq_xprtdata; |
349 | } | 355 | } |
350 | 356 | ||
357 | static inline void | ||
358 | rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list) | ||
359 | { | ||
360 | list_add_tail(&mw->mw_list, list); | ||
361 | } | ||
362 | |||
363 | static inline struct rpcrdma_mw * | ||
364 | rpcrdma_pop_mw(struct list_head *list) | ||
365 | { | ||
366 | struct rpcrdma_mw *mw; | ||
367 | |||
368 | mw = list_first_entry(list, struct rpcrdma_mw, mw_list); | ||
369 | list_del(&mw->mw_list); | ||
370 | return mw; | ||
371 | } | ||
372 | |||
351 | /* | 373 | /* |
352 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for | 374 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for |
353 | * inline requests/replies, and client/server credits. | 375 | * inline requests/replies, and client/server credits. |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 956c7bce80d1..16aff8ddc16f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -52,6 +52,8 @@ | |||
52 | #include "sunrpc.h" | 52 | #include "sunrpc.h" |
53 | 53 | ||
54 | static void xs_close(struct rpc_xprt *xprt); | 54 | static void xs_close(struct rpc_xprt *xprt); |
55 | static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, | ||
56 | struct socket *sock); | ||
55 | 57 | ||
56 | /* | 58 | /* |
57 | * xprtsock tunables | 59 | * xprtsock tunables |
@@ -666,6 +668,9 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
666 | if (task->tk_flags & RPC_TASK_SENT) | 668 | if (task->tk_flags & RPC_TASK_SENT) |
667 | zerocopy = false; | 669 | zerocopy = false; |
668 | 670 | ||
671 | if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) | ||
672 | xs_tcp_set_socket_timeouts(xprt, transport->sock); | ||
673 | |||
669 | /* Continue transmitting the packet/record. We must be careful | 674 | /* Continue transmitting the packet/record. We must be careful |
670 | * to cope with writespace callbacks arriving _after_ we have | 675 | * to cope with writespace callbacks arriving _after_ we have |
671 | * called sendmsg(). */ | 676 | * called sendmsg(). */ |
@@ -1734,7 +1739,9 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t | |||
1734 | */ | 1739 | */ |
1735 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) | 1740 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) |
1736 | { | 1741 | { |
1742 | spin_lock_bh(&xprt->transport_lock); | ||
1737 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); | 1743 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); |
1744 | spin_unlock_bh(&xprt->transport_lock); | ||
1738 | } | 1745 | } |
1739 | 1746 | ||
1740 | static unsigned short xs_get_random_port(void) | 1747 | static unsigned short xs_get_random_port(void) |
@@ -2235,6 +2242,66 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt) | |||
2235 | xs_reset_transport(transport); | 2242 | xs_reset_transport(transport); |
2236 | } | 2243 | } |
2237 | 2244 | ||
2245 | static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, | ||
2246 | struct socket *sock) | ||
2247 | { | ||
2248 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
2249 | unsigned int keepidle; | ||
2250 | unsigned int keepcnt; | ||
2251 | unsigned int opt_on = 1; | ||
2252 | unsigned int timeo; | ||
2253 | |||
2254 | spin_lock_bh(&xprt->transport_lock); | ||
2255 | keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); | ||
2256 | keepcnt = xprt->timeout->to_retries + 1; | ||
2257 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * | ||
2258 | (xprt->timeout->to_retries + 1); | ||
2259 | clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | ||
2260 | spin_unlock_bh(&xprt->transport_lock); | ||
2261 | |||
2262 | /* TCP Keepalive options */ | ||
2263 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, | ||
2264 | (char *)&opt_on, sizeof(opt_on)); | ||
2265 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, | ||
2266 | (char *)&keepidle, sizeof(keepidle)); | ||
2267 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, | ||
2268 | (char *)&keepidle, sizeof(keepidle)); | ||
2269 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, | ||
2270 | (char *)&keepcnt, sizeof(keepcnt)); | ||
2271 | |||
2272 | /* TCP user timeout (see RFC5482) */ | ||
2273 | kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, | ||
2274 | (char *)&timeo, sizeof(timeo)); | ||
2275 | } | ||
2276 | |||
2277 | static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, | ||
2278 | unsigned long connect_timeout, | ||
2279 | unsigned long reconnect_timeout) | ||
2280 | { | ||
2281 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
2282 | struct rpc_timeout to; | ||
2283 | unsigned long initval; | ||
2284 | |||
2285 | spin_lock_bh(&xprt->transport_lock); | ||
2286 | if (reconnect_timeout < xprt->max_reconnect_timeout) | ||
2287 | xprt->max_reconnect_timeout = reconnect_timeout; | ||
2288 | if (connect_timeout < xprt->connect_timeout) { | ||
2289 | memcpy(&to, xprt->timeout, sizeof(to)); | ||
2290 | initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1); | ||
2291 | /* Arbitrary lower limit */ | ||
2292 | if (initval < XS_TCP_INIT_REEST_TO << 1) | ||
2293 | initval = XS_TCP_INIT_REEST_TO << 1; | ||
2294 | to.to_initval = initval; | ||
2295 | to.to_maxval = initval; | ||
2296 | memcpy(&transport->tcp_timeout, &to, | ||
2297 | sizeof(transport->tcp_timeout)); | ||
2298 | xprt->timeout = &transport->tcp_timeout; | ||
2299 | xprt->connect_timeout = connect_timeout; | ||
2300 | } | ||
2301 | set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | ||
2302 | spin_unlock_bh(&xprt->transport_lock); | ||
2303 | } | ||
2304 | |||
2238 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 2305 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
2239 | { | 2306 | { |
2240 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 2307 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2242,22 +2309,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2242 | 2309 | ||
2243 | if (!transport->inet) { | 2310 | if (!transport->inet) { |
2244 | struct sock *sk = sock->sk; | 2311 | struct sock *sk = sock->sk; |
2245 | unsigned int keepidle = xprt->timeout->to_initval / HZ; | ||
2246 | unsigned int keepcnt = xprt->timeout->to_retries + 1; | ||
2247 | unsigned int opt_on = 1; | ||
2248 | unsigned int timeo; | ||
2249 | unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; | 2312 | unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; |
2250 | 2313 | ||
2251 | /* TCP Keepalive options */ | ||
2252 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, | ||
2253 | (char *)&opt_on, sizeof(opt_on)); | ||
2254 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, | ||
2255 | (char *)&keepidle, sizeof(keepidle)); | ||
2256 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, | ||
2257 | (char *)&keepidle, sizeof(keepidle)); | ||
2258 | kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, | ||
2259 | (char *)&keepcnt, sizeof(keepcnt)); | ||
2260 | |||
2261 | /* Avoid temporary address, they are bad for long-lived | 2314 | /* Avoid temporary address, they are bad for long-lived |
2262 | * connections such as NFS mounts. | 2315 | * connections such as NFS mounts. |
2263 | * RFC4941, section 3.6 suggests that: | 2316 | * RFC4941, section 3.6 suggests that: |
@@ -2268,11 +2321,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2268 | kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, | 2321 | kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, |
2269 | (char *)&addr_pref, sizeof(addr_pref)); | 2322 | (char *)&addr_pref, sizeof(addr_pref)); |
2270 | 2323 | ||
2271 | /* TCP user timeout (see RFC5482) */ | 2324 | xs_tcp_set_socket_timeouts(xprt, sock); |
2272 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * | ||
2273 | (xprt->timeout->to_retries + 1); | ||
2274 | kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, | ||
2275 | (char *)&timeo, sizeof(timeo)); | ||
2276 | 2325 | ||
2277 | write_lock_bh(&sk->sk_callback_lock); | 2326 | write_lock_bh(&sk->sk_callback_lock); |
2278 | 2327 | ||
@@ -2721,6 +2770,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2721 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2770 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
2722 | .close = xs_tcp_shutdown, | 2771 | .close = xs_tcp_shutdown, |
2723 | .destroy = xs_destroy, | 2772 | .destroy = xs_destroy, |
2773 | .set_connect_timeout = xs_tcp_set_connect_timeout, | ||
2724 | .print_stats = xs_tcp_print_stats, | 2774 | .print_stats = xs_tcp_print_stats, |
2725 | .enable_swap = xs_enable_swap, | 2775 | .enable_swap = xs_enable_swap, |
2726 | .disable_swap = xs_disable_swap, | 2776 | .disable_swap = xs_disable_swap, |
@@ -3007,6 +3057,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
3007 | xprt->timeout = &xs_tcp_default_timeout; | 3057 | xprt->timeout = &xs_tcp_default_timeout; |
3008 | 3058 | ||
3009 | xprt->max_reconnect_timeout = xprt->timeout->to_maxval; | 3059 | xprt->max_reconnect_timeout = xprt->timeout->to_maxval; |
3060 | xprt->connect_timeout = xprt->timeout->to_initval * | ||
3061 | (xprt->timeout->to_retries + 1); | ||
3010 | 3062 | ||
3011 | INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); | 3063 | INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); |
3012 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); | 3064 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); |
@@ -3209,7 +3261,9 @@ static int param_set_uint_minmax(const char *val, | |||
3209 | if (!val) | 3261 | if (!val) |
3210 | return -EINVAL; | 3262 | return -EINVAL; |
3211 | ret = kstrtouint(val, 0, &num); | 3263 | ret = kstrtouint(val, 0, &num); |
3212 | if (ret == -EINVAL || num < min || num > max) | 3264 | if (ret) |
3265 | return ret; | ||
3266 | if (num < min || num > max) | ||
3213 | return -EINVAL; | 3267 | return -EINVAL; |
3214 | *((unsigned int *)kp->arg) = num; | 3268 | *((unsigned int *)kp->arg) = num; |
3215 | return 0; | 3269 | return 0; |