aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 13:59:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 13:59:39 -0400
commit1db772216f48978d5146b858586f6178433aad38 (patch)
tree4cb1f7345256c7a89c85b7a6157bbf16b944782e /fs/nfsd
parent86652188f345edec56b0074a65f6db17f16eb359 (diff)
parent676e4ebd5f2c3b4fd1d2bff79b68385c23c5c105 (diff)
Merge branch 'for-3.10' of git://linux-nfs.org/~bfields/linux
Pull nfsd changes from J Bruce Fields: "Highlights include: - Some more DRC cleanup and performance work from Jeff Layton - A gss-proxy upcall from Simo Sorce: currently krb5 mounts to the server using credentials from Active Directory often fail due to limitations of the svcgssd upcall interface. This replacement lifts those limitations. The existing upcall is still supported for backwards compatibility. - More NFSv4.1 support: at this point, if a user with a current client who upgrades from 4.0 to 4.1 should see no regressions. In theory we do everything a 4.1 server is required to do. Patches for a couple minor exceptions are ready for 3.11, and with those and some more testing I'd like to turn 4.1 on by default in 3.11." Fix up semantic conflict as per Stephen Rothwell and linux-next: Commit 030d794bf498 ("SUNRPC: Use gssproxy upcall for server RPCGSS authentication") adds two new users of "PDE(inode)->data", but we're supposed to use "PDE_DATA(inode)" instead since commit d9dda78bad87 ("procfs: new helper - PDE_DATA(inode)"). The old PDE() macro is no longer available since commit c30480b92cf4 ("proc: Make the PROC_I() and PDE() macros internal to procfs") * 'for-3.10' of git://linux-nfs.org/~bfields/linux: (60 commits) NFSD: SECINFO doesn't handle unsupported pseudoflavors correctly NFSD: Simplify GSS flavor encoding in nfsd4_do_encode_secinfo() nfsd: make symbol nfsd_reply_cache_shrinker static svcauth_gss: fix error return code in rsc_parse() nfsd4: don't remap EISDIR errors in rename svcrpc: fix gss-proxy to respect user namespaces SUNRPC: gssp_procedures[] can be static SUNRPC: define {create,destroy}_use_gss_proxy_proc_entry in !PROC case nfsd4: better error return to indicate SSV non-support nfsd: fix EXDEV checking in rename SUNRPC: Use gssproxy upcall for server RPCGSS authentication. SUNRPC: Add RPC based upcall mechanism for RPCGSS auth SUNRPC: conditionally return endtime from import_sec_context SUNRPC: allow disabling idle timeout SUNRPC: attempt AF_LOCAL connect on setup nfsd: Decode and send 64bit time values nfsd4: put_client_renew_locked can be static nfsd4: remove unused macro nfsd4: remove some useless code nfsd4: implement SEQ4_STATUS_RECALLABLE_STATE_REVOKED ...
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/cache.h1
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs4callback.c33
-rw-r--r--fs/nfsd/nfs4proc.c97
-rw-r--r--fs/nfsd/nfs4state.c747
-rw-r--r--fs/nfsd/nfs4xdr.c109
-rw-r--r--fs/nfsd/nfscache.c197
-rw-r--r--fs/nfsd/nfsctl.c13
-rw-r--r--fs/nfsd/state.h27
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/nfsd/xdr4.h3
-rw-r--r--fs/nfsd/xdr4cb.h23
12 files changed, 708 insertions, 549 deletions
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 87fd1410b737..d5c5b3e00266 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -82,6 +82,7 @@ int nfsd_reply_cache_init(void);
82void nfsd_reply_cache_shutdown(void); 82void nfsd_reply_cache_shutdown(void);
83int nfsd_cache_lookup(struct svc_rqst *); 83int nfsd_cache_lookup(struct svc_rqst *);
84void nfsd_cache_update(struct svc_rqst *, int, __be32 *); 84void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
85int nfsd_reply_cache_stats_open(struct inode *, struct file *);
85 86
86#ifdef CONFIG_NFSD_V4 87#ifdef CONFIG_NFSD_V4
87void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); 88void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 1051bebff1b0..849a7c3ced22 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -80,6 +80,7 @@ struct nfsd_net {
80 */ 80 */
81 struct list_head client_lru; 81 struct list_head client_lru;
82 struct list_head close_lru; 82 struct list_head close_lru;
83 struct list_head del_recall_lru;
83 84
84 struct delayed_work laundromat_work; 85 struct delayed_work laundromat_work;
85 86
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 99bc85ff0217..7f05cd140de3 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -37,6 +37,7 @@
37#include "nfsd.h" 37#include "nfsd.h"
38#include "state.h" 38#include "state.h"
39#include "netns.h" 39#include "netns.h"
40#include "xdr4cb.h"
40 41
41#define NFSDDBG_FACILITY NFSDDBG_PROC 42#define NFSDDBG_FACILITY NFSDDBG_PROC
42 43
@@ -53,30 +54,6 @@ enum {
53 NFSPROC4_CLNT_CB_SEQUENCE, 54 NFSPROC4_CLNT_CB_SEQUENCE,
54}; 55};
55 56
56#define NFS4_MAXTAGLEN 20
57
58#define NFS4_enc_cb_null_sz 0
59#define NFS4_dec_cb_null_sz 0
60#define cb_compound_enc_hdr_sz 4
61#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
62#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
63#define cb_sequence_enc_sz (sessionid_sz + 4 + \
64 1 /* no referring calls list yet */)
65#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
66
67#define op_enc_sz 1
68#define op_dec_sz 2
69#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
70#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
71#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
72 cb_sequence_enc_sz + \
73 1 + enc_stateid_sz + \
74 enc_nfs4_fh_sz)
75
76#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
77 cb_sequence_dec_sz + \
78 op_dec_sz)
79
80struct nfs4_cb_compound_hdr { 57struct nfs4_cb_compound_hdr {
81 /* args */ 58 /* args */
82 u32 ident; /* minorversion 0 only */ 59 u32 ident; /* minorversion 0 only */
@@ -817,8 +794,7 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
817static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) 794static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
818{ 795{
819 struct nfsd4_callback *cb = calldata; 796 struct nfsd4_callback *cb = calldata;
820 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); 797 struct nfs4_client *clp = cb->cb_clp;
821 struct nfs4_client *clp = dp->dl_stid.sc_client;
822 u32 minorversion = clp->cl_minorversion; 798 u32 minorversion = clp->cl_minorversion;
823 799
824 cb->cb_minorversion = minorversion; 800 cb->cb_minorversion = minorversion;
@@ -839,8 +815,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
839static void nfsd4_cb_done(struct rpc_task *task, void *calldata) 815static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
840{ 816{
841 struct nfsd4_callback *cb = calldata; 817 struct nfsd4_callback *cb = calldata;
842 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); 818 struct nfs4_client *clp = cb->cb_clp;
843 struct nfs4_client *clp = dp->dl_stid.sc_client;
844 819
845 dprintk("%s: minorversion=%d\n", __func__, 820 dprintk("%s: minorversion=%d\n", __func__,
846 clp->cl_minorversion); 821 clp->cl_minorversion);
@@ -863,7 +838,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
863{ 838{
864 struct nfsd4_callback *cb = calldata; 839 struct nfsd4_callback *cb = calldata;
865 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); 840 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
866 struct nfs4_client *clp = dp->dl_stid.sc_client; 841 struct nfs4_client *clp = cb->cb_clp;
867 struct rpc_clnt *current_rpc_client = clp->cl_cb_client; 842 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
868 843
869 nfsd4_cb_done(task, calldata); 844 nfsd4_cb_done(task, calldata);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ae73175e6e68..8ae5abfe6ba2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -191,9 +191,18 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
191 return nfserr_symlink; 191 return nfserr_symlink;
192} 192}
193 193
194static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
195{
196 if (nfsd4_has_session(cstate))
197 return;
198 fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
199 &resfh->fh_handle);
200}
201
194static __be32 202static __be32
195do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 203do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
196{ 204{
205 struct svc_fh *current_fh = &cstate->current_fh;
197 struct svc_fh *resfh; 206 struct svc_fh *resfh;
198 int accmode; 207 int accmode;
199 __be32 status; 208 __be32 status;
@@ -252,9 +261,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
252 if (is_create_with_attrs(open) && open->op_acl != NULL) 261 if (is_create_with_attrs(open) && open->op_acl != NULL)
253 do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval); 262 do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
254 263
255 /* set reply cache */ 264 nfsd4_set_open_owner_reply_cache(cstate, open, resfh);
256 fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
257 &resfh->fh_handle);
258 accmode = NFSD_MAY_NOP; 265 accmode = NFSD_MAY_NOP;
259 if (open->op_created) 266 if (open->op_created)
260 accmode |= NFSD_MAY_OWNER_OVERRIDE; 267 accmode |= NFSD_MAY_OWNER_OVERRIDE;
@@ -268,8 +275,9 @@ out:
268} 275}
269 276
270static __be32 277static __be32
271do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 278do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
272{ 279{
280 struct svc_fh *current_fh = &cstate->current_fh;
273 __be32 status; 281 __be32 status;
274 282
275 /* We don't know the target directory, and therefore can not 283 /* We don't know the target directory, and therefore can not
@@ -278,9 +286,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
278 286
279 memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info)); 287 memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
280 288
281 /* set replay cache */ 289 nfsd4_set_open_owner_reply_cache(cstate, open, current_fh);
282 fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
283 &current_fh->fh_handle);
284 290
285 open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) && 291 open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
286 (open->op_iattr.ia_size == 0); 292 (open->op_iattr.ia_size == 0);
@@ -351,6 +357,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
351 } 357 }
352 if (status) 358 if (status)
353 goto out; 359 goto out;
360 if (open->op_xdr_error) {
361 status = open->op_xdr_error;
362 goto out;
363 }
354 364
355 status = nfsd4_check_open_attributes(rqstp, cstate, open); 365 status = nfsd4_check_open_attributes(rqstp, cstate, open);
356 if (status) 366 if (status)
@@ -368,8 +378,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
368 switch (open->op_claim_type) { 378 switch (open->op_claim_type) {
369 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 379 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
370 case NFS4_OPEN_CLAIM_NULL: 380 case NFS4_OPEN_CLAIM_NULL:
371 status = do_open_lookup(rqstp, &cstate->current_fh, 381 status = do_open_lookup(rqstp, cstate, open);
372 open);
373 if (status) 382 if (status)
374 goto out; 383 goto out;
375 break; 384 break;
@@ -382,8 +391,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
382 goto out; 391 goto out;
383 case NFS4_OPEN_CLAIM_FH: 392 case NFS4_OPEN_CLAIM_FH:
384 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 393 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
385 status = do_open_fhandle(rqstp, &cstate->current_fh, 394 status = do_open_fhandle(rqstp, cstate, open);
386 open);
387 if (status) 395 if (status)
388 goto out; 396 goto out;
389 break; 397 break;
@@ -409,14 +417,33 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
409 WARN_ON(status && open->op_created); 417 WARN_ON(status && open->op_created);
410out: 418out:
411 nfsd4_cleanup_open_state(open, status); 419 nfsd4_cleanup_open_state(open, status);
412 if (open->op_openowner) 420 if (open->op_openowner && !nfsd4_has_session(cstate))
413 cstate->replay_owner = &open->op_openowner->oo_owner; 421 cstate->replay_owner = &open->op_openowner->oo_owner;
414 else 422 nfsd4_bump_seqid(cstate, status);
423 if (!cstate->replay_owner)
415 nfs4_unlock_state(); 424 nfs4_unlock_state();
416 return status; 425 return status;
417} 426}
418 427
419/* 428/*
429 * OPEN is the only seqid-mutating operation whose decoding can fail
430 * with a seqid-mutating error (specifically, decoding of user names in
431 * the attributes). Therefore we have to do some processing to look up
432 * the stateowner so that we can bump the seqid.
433 */
434static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op)
435{
436 struct nfsd4_open *open = (struct nfsd4_open *)&op->u;
437
438 if (!seqid_mutating_err(ntohl(op->status)))
439 return op->status;
440 if (nfsd4_has_session(cstate))
441 return op->status;
442 open->op_xdr_error = op->status;
443 return nfsd4_open(rqstp, cstate, open);
444}
445
446/*
420 * filehandle-manipulating ops. 447 * filehandle-manipulating ops.
421 */ 448 */
422static __be32 449static __be32
@@ -786,21 +813,11 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
786 status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname, 813 status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
787 rename->rn_snamelen, &cstate->current_fh, 814 rename->rn_snamelen, &cstate->current_fh,
788 rename->rn_tname, rename->rn_tnamelen); 815 rename->rn_tname, rename->rn_tnamelen);
789 816 if (status)
790 /* the underlying filesystem returns different error's than required 817 return status;
791 * by NFSv4. both save_fh and current_fh have been verified.. */ 818 set_change_info(&rename->rn_sinfo, &cstate->current_fh);
792 if (status == nfserr_isdir) 819 set_change_info(&rename->rn_tinfo, &cstate->save_fh);
793 status = nfserr_exist; 820 return nfs_ok;
794 else if ((status == nfserr_notdir) &&
795 (S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) &&
796 S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode)))
797 status = nfserr_exist;
798
799 if (!status) {
800 set_change_info(&rename->rn_sinfo, &cstate->current_fh);
801 set_change_info(&rename->rn_tinfo, &cstate->save_fh);
802 }
803 return status;
804} 821}
805 822
806static __be32 823static __be32
@@ -931,14 +948,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
931 nfs4_lock_state(); 948 nfs4_lock_state();
932 status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), 949 status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
933 cstate, stateid, WR_STATE, &filp); 950 cstate, stateid, WR_STATE, &filp);
934 if (filp)
935 get_file(filp);
936 nfs4_unlock_state();
937
938 if (status) { 951 if (status) {
952 nfs4_unlock_state();
939 dprintk("NFSD: nfsd4_write: couldn't process stateid!\n"); 953 dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
940 return status; 954 return status;
941 } 955 }
956 if (filp)
957 get_file(filp);
958 nfs4_unlock_state();
942 959
943 cnt = write->wr_buflen; 960 cnt = write->wr_buflen;
944 write->wr_how_written = write->wr_stable_how; 961 write->wr_how_written = write->wr_stable_how;
@@ -1244,8 +1261,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1244 * for example, if there is a miscellaneous XDR error 1261 * for example, if there is a miscellaneous XDR error
1245 * it will be set to nfserr_bad_xdr. 1262 * it will be set to nfserr_bad_xdr.
1246 */ 1263 */
1247 if (op->status) 1264 if (op->status) {
1265 if (op->opnum == OP_OPEN)
1266 op->status = nfsd4_open_omfg(rqstp, cstate, op);
1248 goto encode_op; 1267 goto encode_op;
1268 }
1249 1269
1250 /* We must be able to encode a successful response to 1270 /* We must be able to encode a successful response to
1251 * this operation, with enough room left over to encode a 1271 * this operation, with enough room left over to encode a
@@ -1282,12 +1302,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1282 if (op->status) 1302 if (op->status)
1283 goto encode_op; 1303 goto encode_op;
1284 1304
1285 if (opdesc->op_func) { 1305 if (opdesc->op_get_currentstateid)
1286 if (opdesc->op_get_currentstateid) 1306 opdesc->op_get_currentstateid(cstate, &op->u);
1287 opdesc->op_get_currentstateid(cstate, &op->u); 1307 op->status = opdesc->op_func(rqstp, cstate, &op->u);
1288 op->status = opdesc->op_func(rqstp, cstate, &op->u);
1289 } else
1290 BUG_ON(op->status == nfs_ok);
1291 1308
1292 if (!op->status) { 1309 if (!op->status) {
1293 if (opdesc->op_set_currentstateid) 1310 if (opdesc->op_set_currentstateid)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 417c84877742..316ec843dec2 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -42,6 +42,7 @@
42#include <linux/sunrpc/svcauth_gss.h> 42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h> 43#include <linux/sunrpc/addr.h>
44#include "xdr4.h" 44#include "xdr4.h"
45#include "xdr4cb.h"
45#include "vfs.h" 46#include "vfs.h"
46#include "current_stateid.h" 47#include "current_stateid.h"
47 48
@@ -94,17 +95,32 @@ nfs4_lock_state(void)
94 mutex_lock(&client_mutex); 95 mutex_lock(&client_mutex);
95} 96}
96 97
97static void free_session(struct kref *); 98static void free_session(struct nfsd4_session *);
98 99
99/* Must be called under the client_lock */ 100void nfsd4_put_session(struct nfsd4_session *ses)
100static void nfsd4_put_session_locked(struct nfsd4_session *ses) 101{
102 atomic_dec(&ses->se_ref);
103}
104
105static bool is_session_dead(struct nfsd4_session *ses)
106{
107 return ses->se_flags & NFS4_SESSION_DEAD;
108}
109
110static __be32 mark_session_dead_locked(struct nfsd4_session *ses)
101{ 111{
102 kref_put(&ses->se_ref, free_session); 112 if (atomic_read(&ses->se_ref))
113 return nfserr_jukebox;
114 ses->se_flags |= NFS4_SESSION_DEAD;
115 return nfs_ok;
103} 116}
104 117
105static void nfsd4_get_session(struct nfsd4_session *ses) 118static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
106{ 119{
107 kref_get(&ses->se_ref); 120 if (is_session_dead(ses))
121 return nfserr_badsession;
122 atomic_inc(&ses->se_ref);
123 return nfs_ok;
108} 124}
109 125
110void 126void
@@ -113,6 +129,90 @@ nfs4_unlock_state(void)
113 mutex_unlock(&client_mutex); 129 mutex_unlock(&client_mutex);
114} 130}
115 131
132static bool is_client_expired(struct nfs4_client *clp)
133{
134 return clp->cl_time == 0;
135}
136
137static __be32 mark_client_expired_locked(struct nfs4_client *clp)
138{
139 if (atomic_read(&clp->cl_refcount))
140 return nfserr_jukebox;
141 clp->cl_time = 0;
142 return nfs_ok;
143}
144
145static __be32 mark_client_expired(struct nfs4_client *clp)
146{
147 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
148 __be32 ret;
149
150 spin_lock(&nn->client_lock);
151 ret = mark_client_expired_locked(clp);
152 spin_unlock(&nn->client_lock);
153 return ret;
154}
155
156static __be32 get_client_locked(struct nfs4_client *clp)
157{
158 if (is_client_expired(clp))
159 return nfserr_expired;
160 atomic_inc(&clp->cl_refcount);
161 return nfs_ok;
162}
163
164/* must be called under the client_lock */
165static inline void
166renew_client_locked(struct nfs4_client *clp)
167{
168 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
169
170 if (is_client_expired(clp)) {
171 WARN_ON(1);
172 printk("%s: client (clientid %08x/%08x) already expired\n",
173 __func__,
174 clp->cl_clientid.cl_boot,
175 clp->cl_clientid.cl_id);
176 return;
177 }
178
179 dprintk("renewing client (clientid %08x/%08x)\n",
180 clp->cl_clientid.cl_boot,
181 clp->cl_clientid.cl_id);
182 list_move_tail(&clp->cl_lru, &nn->client_lru);
183 clp->cl_time = get_seconds();
184}
185
186static inline void
187renew_client(struct nfs4_client *clp)
188{
189 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
190
191 spin_lock(&nn->client_lock);
192 renew_client_locked(clp);
193 spin_unlock(&nn->client_lock);
194}
195
196static void put_client_renew_locked(struct nfs4_client *clp)
197{
198 if (!atomic_dec_and_test(&clp->cl_refcount))
199 return;
200 if (!is_client_expired(clp))
201 renew_client_locked(clp);
202}
203
204void put_client_renew(struct nfs4_client *clp)
205{
206 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
207
208 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
209 return;
210 if (!is_client_expired(clp))
211 renew_client_locked(clp);
212 spin_unlock(&nn->client_lock);
213}
214
215
116static inline u32 216static inline u32
117opaque_hashval(const void *ptr, int nbytes) 217opaque_hashval(const void *ptr, int nbytes)
118{ 218{
@@ -126,8 +226,6 @@ opaque_hashval(const void *ptr, int nbytes)
126 return x; 226 return x;
127} 227}
128 228
129static struct list_head del_recall_lru;
130
131static void nfsd4_free_file(struct nfs4_file *f) 229static void nfsd4_free_file(struct nfs4_file *f)
132{ 230{
133 kmem_cache_free(file_slab, f); 231 kmem_cache_free(file_slab, f);
@@ -137,7 +235,7 @@ static inline void
137put_nfs4_file(struct nfs4_file *fi) 235put_nfs4_file(struct nfs4_file *fi)
138{ 236{
139 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { 237 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
140 list_del(&fi->fi_hash); 238 hlist_del(&fi->fi_hash);
141 spin_unlock(&recall_lock); 239 spin_unlock(&recall_lock);
142 iput(fi->fi_inode); 240 iput(fi->fi_inode);
143 nfsd4_free_file(fi); 241 nfsd4_free_file(fi);
@@ -181,7 +279,7 @@ static unsigned int file_hashval(struct inode *ino)
181 return hash_ptr(ino, FILE_HASH_BITS); 279 return hash_ptr(ino, FILE_HASH_BITS);
182} 280}
183 281
184static struct list_head file_hashtbl[FILE_HASH_SIZE]; 282static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
185 283
186static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 284static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
187{ 285{
@@ -210,13 +308,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
210{ 308{
211 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 309 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
212 nfs4_file_put_fd(fp, oflag); 310 nfs4_file_put_fd(fp, oflag);
213 /* 311 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
214 * It's also safe to get rid of the RDWR open *if*
215 * we no longer have need of the other kind of access
216 * or if we already have the other kind of open:
217 */
218 if (fp->fi_fds[1-oflag]
219 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
220 nfs4_file_put_fd(fp, O_RDWR); 312 nfs4_file_put_fd(fp, O_RDWR);
221 } 313 }
222} 314}
@@ -262,7 +354,7 @@ kmem_cache *slab)
262 */ 354 */
263 return stid; 355 return stid;
264out_free: 356out_free:
265 kfree(stid); 357 kmem_cache_free(slab, stid);
266 return NULL; 358 return NULL;
267} 359}
268 360
@@ -313,21 +405,18 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
313 return dp; 405 return dp;
314} 406}
315 407
316static void free_stid(struct nfs4_stid *s, struct kmem_cache *slab) 408static void remove_stid(struct nfs4_stid *s)
317{ 409{
318 struct idr *stateids = &s->sc_client->cl_stateids; 410 struct idr *stateids = &s->sc_client->cl_stateids;
319 411
320 idr_remove(stateids, s->sc_stateid.si_opaque.so_id); 412 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
321 kmem_cache_free(slab, s);
322} 413}
323 414
324void 415void
325nfs4_put_delegation(struct nfs4_delegation *dp) 416nfs4_put_delegation(struct nfs4_delegation *dp)
326{ 417{
327 if (atomic_dec_and_test(&dp->dl_count)) { 418 if (atomic_dec_and_test(&dp->dl_count)) {
328 dprintk("NFSD: freeing dp %p\n",dp); 419 kmem_cache_free(deleg_slab, dp);
329 put_nfs4_file(dp->dl_file);
330 free_stid(&dp->dl_stid, deleg_slab);
331 num_delegations--; 420 num_delegations--;
332 } 421 }
333} 422}
@@ -351,16 +440,45 @@ static void unhash_stid(struct nfs4_stid *s)
351static void 440static void
352unhash_delegation(struct nfs4_delegation *dp) 441unhash_delegation(struct nfs4_delegation *dp)
353{ 442{
354 unhash_stid(&dp->dl_stid);
355 list_del_init(&dp->dl_perclnt); 443 list_del_init(&dp->dl_perclnt);
356 spin_lock(&recall_lock); 444 spin_lock(&recall_lock);
357 list_del_init(&dp->dl_perfile); 445 list_del_init(&dp->dl_perfile);
358 list_del_init(&dp->dl_recall_lru); 446 list_del_init(&dp->dl_recall_lru);
359 spin_unlock(&recall_lock); 447 spin_unlock(&recall_lock);
360 nfs4_put_deleg_lease(dp->dl_file); 448 nfs4_put_deleg_lease(dp->dl_file);
449 put_nfs4_file(dp->dl_file);
450 dp->dl_file = NULL;
451}
452
453
454
455static void destroy_revoked_delegation(struct nfs4_delegation *dp)
456{
457 list_del_init(&dp->dl_recall_lru);
458 remove_stid(&dp->dl_stid);
361 nfs4_put_delegation(dp); 459 nfs4_put_delegation(dp);
362} 460}
363 461
462static void destroy_delegation(struct nfs4_delegation *dp)
463{
464 unhash_delegation(dp);
465 remove_stid(&dp->dl_stid);
466 nfs4_put_delegation(dp);
467}
468
469static void revoke_delegation(struct nfs4_delegation *dp)
470{
471 struct nfs4_client *clp = dp->dl_stid.sc_client;
472
473 if (clp->cl_minorversion == 0)
474 destroy_delegation(dp);
475 else {
476 unhash_delegation(dp);
477 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
478 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
479 }
480}
481
364/* 482/*
365 * SETCLIENTID state 483 * SETCLIENTID state
366 */ 484 */
@@ -501,7 +619,8 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp)
501 619
502static void free_generic_stateid(struct nfs4_ol_stateid *stp) 620static void free_generic_stateid(struct nfs4_ol_stateid *stp)
503{ 621{
504 free_stid(&stp->st_stid, stateid_slab); 622 remove_stid(&stp->st_stid);
623 kmem_cache_free(stateid_slab, stp);
505} 624}
506 625
507static void release_lock_stateid(struct nfs4_ol_stateid *stp) 626static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -617,6 +736,28 @@ dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
617} 736}
618#endif 737#endif
619 738
739/*
740 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
741 * won't be used for replay.
742 */
743void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
744{
745 struct nfs4_stateowner *so = cstate->replay_owner;
746
747 if (nfserr == nfserr_replay_me)
748 return;
749
750 if (!seqid_mutating_err(ntohl(nfserr))) {
751 cstate->replay_owner = NULL;
752 return;
753 }
754 if (!so)
755 return;
756 if (so->so_is_open_owner)
757 release_last_closed_stateid(openowner(so));
758 so->so_seqid++;
759 return;
760}
620 761
621static void 762static void
622gen_sessionid(struct nfsd4_session *ses) 763gen_sessionid(struct nfsd4_session *ses)
@@ -657,17 +798,15 @@ free_session_slots(struct nfsd4_session *ses)
657 * We don't actually need to cache the rpc and session headers, so we 798 * We don't actually need to cache the rpc and session headers, so we
658 * can allocate a little less for each slot: 799 * can allocate a little less for each slot:
659 */ 800 */
660static inline int slot_bytes(struct nfsd4_channel_attrs *ca) 801static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
661{ 802{
662 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 803 u32 size;
663}
664 804
665static int nfsd4_sanitize_slot_size(u32 size) 805 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
666{ 806 size = 0;
667 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ 807 else
668 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); 808 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
669 809 return size + sizeof(struct nfsd4_slot);
670 return size;
671} 810}
672 811
673/* 812/*
@@ -675,12 +814,12 @@ static int nfsd4_sanitize_slot_size(u32 size)
675 * re-negotiate active sessions and reduce their slot usage to make 814 * re-negotiate active sessions and reduce their slot usage to make
676 * room for new connections. For now we just fail the create session. 815 * room for new connections. For now we just fail the create session.
677 */ 816 */
678static int nfsd4_get_drc_mem(int slotsize, u32 num) 817static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
679{ 818{
819 u32 slotsize = slot_bytes(ca);
820 u32 num = ca->maxreqs;
680 int avail; 821 int avail;
681 822
682 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
683
684 spin_lock(&nfsd_drc_lock); 823 spin_lock(&nfsd_drc_lock);
685 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, 824 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
686 nfsd_drc_max_mem - nfsd_drc_mem_used); 825 nfsd_drc_max_mem - nfsd_drc_mem_used);
@@ -691,15 +830,19 @@ static int nfsd4_get_drc_mem(int slotsize, u32 num)
691 return num; 830 return num;
692} 831}
693 832
694static void nfsd4_put_drc_mem(int slotsize, int num) 833static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
695{ 834{
835 int slotsize = slot_bytes(ca);
836
696 spin_lock(&nfsd_drc_lock); 837 spin_lock(&nfsd_drc_lock);
697 nfsd_drc_mem_used -= slotsize * num; 838 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
698 spin_unlock(&nfsd_drc_lock); 839 spin_unlock(&nfsd_drc_lock);
699} 840}
700 841
701static struct nfsd4_session *__alloc_session(int slotsize, int numslots) 842static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
702{ 843{
844 int numslots = attrs->maxreqs;
845 int slotsize = slot_bytes(attrs);
703 struct nfsd4_session *new; 846 struct nfsd4_session *new;
704 int mem, i; 847 int mem, i;
705 848
@@ -712,8 +855,7 @@ static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
712 return NULL; 855 return NULL;
713 /* allocate each struct nfsd4_slot and data cache in one piece */ 856 /* allocate each struct nfsd4_slot and data cache in one piece */
714 for (i = 0; i < numslots; i++) { 857 for (i = 0; i < numslots; i++) {
715 mem = sizeof(struct nfsd4_slot) + slotsize; 858 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
716 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
717 if (!new->se_slots[i]) 859 if (!new->se_slots[i])
718 goto out_free; 860 goto out_free;
719 } 861 }
@@ -725,21 +867,6 @@ out_free:
725 return NULL; 867 return NULL;
726} 868}
727 869
728static void init_forechannel_attrs(struct nfsd4_channel_attrs *new,
729 struct nfsd4_channel_attrs *req,
730 int numslots, int slotsize,
731 struct nfsd_net *nn)
732{
733 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
734
735 new->maxreqs = numslots;
736 new->maxresp_cached = min_t(u32, req->maxresp_cached,
737 slotsize + NFSD_MIN_HDR_SEQ_SZ);
738 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
739 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
740 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
741}
742
743static void free_conn(struct nfsd4_conn *c) 870static void free_conn(struct nfsd4_conn *c)
744{ 871{
745 svc_xprt_put(c->cn_xprt); 872 svc_xprt_put(c->cn_xprt);
@@ -756,8 +883,8 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u)
756 list_del(&c->cn_persession); 883 list_del(&c->cn_persession);
757 free_conn(c); 884 free_conn(c);
758 } 885 }
759 spin_unlock(&clp->cl_lock);
760 nfsd4_probe_callback(clp); 886 nfsd4_probe_callback(clp);
887 spin_unlock(&clp->cl_lock);
761} 888}
762 889
763static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 890static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
@@ -841,59 +968,20 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
841 968
842static void __free_session(struct nfsd4_session *ses) 969static void __free_session(struct nfsd4_session *ses)
843{ 970{
844 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
845 free_session_slots(ses); 971 free_session_slots(ses);
846 kfree(ses); 972 kfree(ses);
847} 973}
848 974
849static void free_session(struct kref *kref) 975static void free_session(struct nfsd4_session *ses)
850{ 976{
851 struct nfsd4_session *ses; 977 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
852 struct nfsd_net *nn;
853
854 ses = container_of(kref, struct nfsd4_session, se_ref);
855 nn = net_generic(ses->se_client->net, nfsd_net_id);
856 978
857 lockdep_assert_held(&nn->client_lock); 979 lockdep_assert_held(&nn->client_lock);
858 nfsd4_del_conns(ses); 980 nfsd4_del_conns(ses);
981 nfsd4_put_drc_mem(&ses->se_fchannel);
859 __free_session(ses); 982 __free_session(ses);
860} 983}
861 984
862void nfsd4_put_session(struct nfsd4_session *ses)
863{
864 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
865
866 spin_lock(&nn->client_lock);
867 nfsd4_put_session_locked(ses);
868 spin_unlock(&nn->client_lock);
869}
870
871static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan,
872 struct nfsd_net *nn)
873{
874 struct nfsd4_session *new;
875 int numslots, slotsize;
876 /*
877 * Note decreasing slot size below client's request may
878 * make it difficult for client to function correctly, whereas
879 * decreasing the number of slots will (just?) affect
880 * performance. When short on memory we therefore prefer to
881 * decrease number of slots instead of their size.
882 */
883 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
884 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
885 if (numslots < 1)
886 return NULL;
887
888 new = __alloc_session(slotsize, numslots);
889 if (!new) {
890 nfsd4_put_drc_mem(slotsize, numslots);
891 return NULL;
892 }
893 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn);
894 return new;
895}
896
897static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 985static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
898{ 986{
899 int idx; 987 int idx;
@@ -908,7 +996,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
908 new->se_flags = cses->flags; 996 new->se_flags = cses->flags;
909 new->se_cb_prog = cses->callback_prog; 997 new->se_cb_prog = cses->callback_prog;
910 new->se_cb_sec = cses->cb_sec; 998 new->se_cb_sec = cses->cb_sec;
911 kref_init(&new->se_ref); 999 atomic_set(&new->se_ref, 0);
912 idx = hash_sessionid(&new->se_sessionid); 1000 idx = hash_sessionid(&new->se_sessionid);
913 spin_lock(&nn->client_lock); 1001 spin_lock(&nn->client_lock);
914 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1002 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
@@ -916,7 +1004,8 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
916 list_add(&new->se_perclnt, &clp->cl_sessions); 1004 list_add(&new->se_perclnt, &clp->cl_sessions);
917 spin_unlock(&clp->cl_lock); 1005 spin_unlock(&clp->cl_lock);
918 spin_unlock(&nn->client_lock); 1006 spin_unlock(&nn->client_lock);
919 1007 memcpy(&new->se_fchannel, &cses->fore_channel,
1008 sizeof(struct nfsd4_channel_attrs));
920 if (cses->flags & SESSION4_BACK_CHAN) { 1009 if (cses->flags & SESSION4_BACK_CHAN) {
921 struct sockaddr *sa = svc_addr(rqstp); 1010 struct sockaddr *sa = svc_addr(rqstp);
922 /* 1011 /*
@@ -963,38 +1052,6 @@ unhash_session(struct nfsd4_session *ses)
963 spin_unlock(&ses->se_client->cl_lock); 1052 spin_unlock(&ses->se_client->cl_lock);
964} 1053}
965 1054
966/* must be called under the client_lock */
967static inline void
968renew_client_locked(struct nfs4_client *clp)
969{
970 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
971
972 if (is_client_expired(clp)) {
973 WARN_ON(1);
974 printk("%s: client (clientid %08x/%08x) already expired\n",
975 __func__,
976 clp->cl_clientid.cl_boot,
977 clp->cl_clientid.cl_id);
978 return;
979 }
980
981 dprintk("renewing client (clientid %08x/%08x)\n",
982 clp->cl_clientid.cl_boot,
983 clp->cl_clientid.cl_id);
984 list_move_tail(&clp->cl_lru, &nn->client_lru);
985 clp->cl_time = get_seconds();
986}
987
988static inline void
989renew_client(struct nfs4_client *clp)
990{
991 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
992
993 spin_lock(&nn->client_lock);
994 renew_client_locked(clp);
995 spin_unlock(&nn->client_lock);
996}
997
998/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1055/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
999static int 1056static int
1000STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1057STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
@@ -1038,7 +1095,8 @@ free_client(struct nfs4_client *clp)
1038 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1095 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1039 se_perclnt); 1096 se_perclnt);
1040 list_del(&ses->se_perclnt); 1097 list_del(&ses->se_perclnt);
1041 nfsd4_put_session_locked(ses); 1098 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1099 free_session(ses);
1042 } 1100 }
1043 free_svc_cred(&clp->cl_cred); 1101 free_svc_cred(&clp->cl_cred);
1044 kfree(clp->cl_name.data); 1102 kfree(clp->cl_name.data);
@@ -1046,29 +1104,12 @@ free_client(struct nfs4_client *clp)
1046 kfree(clp); 1104 kfree(clp);
1047} 1105}
1048 1106
1049void
1050release_session_client(struct nfsd4_session *session)
1051{
1052 struct nfs4_client *clp = session->se_client;
1053 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1054
1055 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
1056 return;
1057 if (is_client_expired(clp)) {
1058 free_client(clp);
1059 session->se_client = NULL;
1060 } else
1061 renew_client_locked(clp);
1062 spin_unlock(&nn->client_lock);
1063}
1064
1065/* must be called under the client_lock */ 1107/* must be called under the client_lock */
1066static inline void 1108static inline void
1067unhash_client_locked(struct nfs4_client *clp) 1109unhash_client_locked(struct nfs4_client *clp)
1068{ 1110{
1069 struct nfsd4_session *ses; 1111 struct nfsd4_session *ses;
1070 1112
1071 mark_client_expired(clp);
1072 list_del(&clp->cl_lru); 1113 list_del(&clp->cl_lru);
1073 spin_lock(&clp->cl_lock); 1114 spin_lock(&clp->cl_lock);
1074 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1115 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
@@ -1094,7 +1135,7 @@ destroy_client(struct nfs4_client *clp)
1094 spin_unlock(&recall_lock); 1135 spin_unlock(&recall_lock);
1095 while (!list_empty(&reaplist)) { 1136 while (!list_empty(&reaplist)) {
1096 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1137 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1097 unhash_delegation(dp); 1138 destroy_delegation(dp);
1098 } 1139 }
1099 while (!list_empty(&clp->cl_openowners)) { 1140 while (!list_empty(&clp->cl_openowners)) {
1100 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1141 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
@@ -1110,8 +1151,8 @@ destroy_client(struct nfs4_client *clp)
1110 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1151 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1111 spin_lock(&nn->client_lock); 1152 spin_lock(&nn->client_lock);
1112 unhash_client_locked(clp); 1153 unhash_client_locked(clp);
1113 if (atomic_read(&clp->cl_refcount) == 0) 1154 WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
1114 free_client(clp); 1155 free_client(clp);
1115 spin_unlock(&nn->client_lock); 1156 spin_unlock(&nn->client_lock);
1116} 1157}
1117 1158
@@ -1290,6 +1331,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1290 INIT_LIST_HEAD(&clp->cl_delegations); 1331 INIT_LIST_HEAD(&clp->cl_delegations);
1291 INIT_LIST_HEAD(&clp->cl_lru); 1332 INIT_LIST_HEAD(&clp->cl_lru);
1292 INIT_LIST_HEAD(&clp->cl_callbacks); 1333 INIT_LIST_HEAD(&clp->cl_callbacks);
1334 INIT_LIST_HEAD(&clp->cl_revoked);
1293 spin_lock_init(&clp->cl_lock); 1335 spin_lock_init(&clp->cl_lock);
1294 nfsd4_init_callback(&clp->cl_cb_null); 1336 nfsd4_init_callback(&clp->cl_cb_null);
1295 clp->cl_time = get_seconds(); 1337 clp->cl_time = get_seconds();
@@ -1371,12 +1413,12 @@ move_to_confirmed(struct nfs4_client *clp)
1371} 1413}
1372 1414
1373static struct nfs4_client * 1415static struct nfs4_client *
1374find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1416find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1375{ 1417{
1376 struct nfs4_client *clp; 1418 struct nfs4_client *clp;
1377 unsigned int idhashval = clientid_hashval(clid->cl_id); 1419 unsigned int idhashval = clientid_hashval(clid->cl_id);
1378 1420
1379 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) { 1421 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1380 if (same_clid(&clp->cl_clientid, clid)) { 1422 if (same_clid(&clp->cl_clientid, clid)) {
1381 if ((bool)clp->cl_minorversion != sessions) 1423 if ((bool)clp->cl_minorversion != sessions)
1382 return NULL; 1424 return NULL;
@@ -1388,19 +1430,19 @@ find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1388} 1430}
1389 1431
1390static struct nfs4_client * 1432static struct nfs4_client *
1433find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1434{
1435 struct list_head *tbl = nn->conf_id_hashtbl;
1436
1437 return find_client_in_id_table(tbl, clid, sessions);
1438}
1439
1440static struct nfs4_client *
1391find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1441find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1392{ 1442{
1393 struct nfs4_client *clp; 1443 struct list_head *tbl = nn->unconf_id_hashtbl;
1394 unsigned int idhashval = clientid_hashval(clid->cl_id);
1395 1444
1396 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) { 1445 return find_client_in_id_table(tbl, clid, sessions);
1397 if (same_clid(&clp->cl_clientid, clid)) {
1398 if ((bool)clp->cl_minorversion != sessions)
1399 return NULL;
1400 return clp;
1401 }
1402 }
1403 return NULL;
1404} 1446}
1405 1447
1406static bool clp_used_exchangeid(struct nfs4_client *clp) 1448static bool clp_used_exchangeid(struct nfs4_client *clp)
@@ -1604,6 +1646,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1604 default: /* checked by xdr code */ 1646 default: /* checked by xdr code */
1605 WARN_ON_ONCE(1); 1647 WARN_ON_ONCE(1);
1606 case SP4_SSV: 1648 case SP4_SSV:
1649 return nfserr_encr_alg_unsupp;
1607 case SP4_MACH_CRED: 1650 case SP4_MACH_CRED:
1608 return nfserr_serverfault; /* no excuse :-/ */ 1651 return nfserr_serverfault; /* no excuse :-/ */
1609 } 1652 }
@@ -1745,10 +1788,55 @@ nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1745 /* seqid, slotID, slotID, slotID, status */ \ 1788 /* seqid, slotID, slotID, slotID, status */ \
1746 5 ) * sizeof(__be32)) 1789 5 ) * sizeof(__be32))
1747 1790
1748static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) 1791static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1792{
1793 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
1794
1795 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
1796 return nfserr_toosmall;
1797 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
1798 return nfserr_toosmall;
1799 ca->headerpadsz = 0;
1800 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
1801 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
1802 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
1803 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
1804 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
1805 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
1806 /*
1807 * Note decreasing slot size below client's request may make it
1808 * difficult for client to function correctly, whereas
1809 * decreasing the number of slots will (just?) affect
1810 * performance. When short on memory we therefore prefer to
1811 * decrease number of slots instead of their size. Clients that
1812 * request larger slots than they need will get poor results:
1813 */
1814 ca->maxreqs = nfsd4_get_drc_mem(ca);
1815 if (!ca->maxreqs)
1816 return nfserr_jukebox;
1817
1818 return nfs_ok;
1819}
1820
1821static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
1749{ 1822{
1750 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ 1823 ca->headerpadsz = 0;
1751 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; 1824
1825 /*
1826 * These RPC_MAX_HEADER macros are overkill, especially since we
1827 * don't even do gss on the backchannel yet. But this is still
1828 * less than 1k. Tighten up this estimate in the unlikely event
1829 * it turns out to be a problem for some client:
1830 */
1831 if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH)
1832 return nfserr_toosmall;
1833 if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH)
1834 return nfserr_toosmall;
1835 ca->maxresp_cached = 0;
1836 if (ca->maxops < 2)
1837 return nfserr_toosmall;
1838
1839 return nfs_ok;
1752} 1840}
1753 1841
1754__be32 1842__be32
@@ -1766,12 +1854,16 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1766 1854
1767 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1855 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1768 return nfserr_inval; 1856 return nfserr_inval;
1769 if (check_forechannel_attrs(cr_ses->fore_channel)) 1857 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
1770 return nfserr_toosmall; 1858 if (status)
1771 new = alloc_session(&cr_ses->fore_channel, nn); 1859 return status;
1772 if (!new) 1860 status = check_backchannel_attrs(&cr_ses->back_channel);
1773 return nfserr_jukebox; 1861 if (status)
1862 return status;
1774 status = nfserr_jukebox; 1863 status = nfserr_jukebox;
1864 new = alloc_session(&cr_ses->fore_channel);
1865 if (!new)
1866 goto out_release_drc_mem;
1775 conn = alloc_conn_from_crses(rqstp, cr_ses); 1867 conn = alloc_conn_from_crses(rqstp, cr_ses);
1776 if (!conn) 1868 if (!conn)
1777 goto out_free_session; 1869 goto out_free_session;
@@ -1779,6 +1871,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1779 nfs4_lock_state(); 1871 nfs4_lock_state();
1780 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 1872 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1781 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 1873 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1874 WARN_ON_ONCE(conf && unconf);
1782 1875
1783 if (conf) { 1876 if (conf) {
1784 cs_slot = &conf->cl_cs_slot; 1877 cs_slot = &conf->cl_cs_slot;
@@ -1805,8 +1898,12 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1805 goto out_free_conn; 1898 goto out_free_conn;
1806 } 1899 }
1807 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 1900 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1808 if (old) 1901 if (old) {
1902 status = mark_client_expired(old);
1903 if (status)
1904 goto out_free_conn;
1809 expire_client(old); 1905 expire_client(old);
1906 }
1810 move_to_confirmed(unconf); 1907 move_to_confirmed(unconf);
1811 conf = unconf; 1908 conf = unconf;
1812 } else { 1909 } else {
@@ -1825,23 +1922,21 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1825 1922
1826 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 1923 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1827 NFS4_MAX_SESSIONID_LEN); 1924 NFS4_MAX_SESSIONID_LEN);
1828 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1829 sizeof(struct nfsd4_channel_attrs));
1830 cs_slot->sl_seqid++; 1925 cs_slot->sl_seqid++;
1831 cr_ses->seqid = cs_slot->sl_seqid; 1926 cr_ses->seqid = cs_slot->sl_seqid;
1832 1927
1833 /* cache solo and embedded create sessions under the state lock */ 1928 /* cache solo and embedded create sessions under the state lock */
1834 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1929 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1835 nfs4_unlock_state(); 1930 nfs4_unlock_state();
1836out:
1837 dprintk("%s returns %d\n", __func__, ntohl(status));
1838 return status; 1931 return status;
1839out_free_conn: 1932out_free_conn:
1840 nfs4_unlock_state(); 1933 nfs4_unlock_state();
1841 free_conn(conn); 1934 free_conn(conn);
1842out_free_session: 1935out_free_session:
1843 __free_session(new); 1936 __free_session(new);
1844 goto out; 1937out_release_drc_mem:
1938 nfsd4_put_drc_mem(&cr_ses->fore_channel);
1939 return status;
1845} 1940}
1846 1941
1847static __be32 nfsd4_map_bcts_dir(u32 *dir) 1942static __be32 nfsd4_map_bcts_dir(u32 *dir)
@@ -1879,30 +1974,30 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1879{ 1974{
1880 __be32 status; 1975 __be32 status;
1881 struct nfsd4_conn *conn; 1976 struct nfsd4_conn *conn;
1977 struct nfsd4_session *session;
1882 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1978 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1883 1979
1884 if (!nfsd4_last_compound_op(rqstp)) 1980 if (!nfsd4_last_compound_op(rqstp))
1885 return nfserr_not_only_op; 1981 return nfserr_not_only_op;
1982 nfs4_lock_state();
1886 spin_lock(&nn->client_lock); 1983 spin_lock(&nn->client_lock);
1887 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); 1984 session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
1888 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1889 * client_lock iself: */
1890 if (cstate->session) {
1891 nfsd4_get_session(cstate->session);
1892 atomic_inc(&cstate->session->se_client->cl_refcount);
1893 }
1894 spin_unlock(&nn->client_lock); 1985 spin_unlock(&nn->client_lock);
1895 if (!cstate->session) 1986 status = nfserr_badsession;
1896 return nfserr_badsession; 1987 if (!session)
1897 1988 goto out;
1898 status = nfsd4_map_bcts_dir(&bcts->dir); 1989 status = nfsd4_map_bcts_dir(&bcts->dir);
1899 if (status) 1990 if (status)
1900 return status; 1991 goto out;
1901 conn = alloc_conn(rqstp, bcts->dir); 1992 conn = alloc_conn(rqstp, bcts->dir);
1993 status = nfserr_jukebox;
1902 if (!conn) 1994 if (!conn)
1903 return nfserr_jukebox; 1995 goto out;
1904 nfsd4_init_conn(rqstp, conn, cstate->session); 1996 nfsd4_init_conn(rqstp, conn, session);
1905 return nfs_ok; 1997 status = nfs_ok;
1998out:
1999 nfs4_unlock_state();
2000 return status;
1906} 2001}
1907 2002
1908static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 2003static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
@@ -1918,42 +2013,36 @@ nfsd4_destroy_session(struct svc_rqst *r,
1918 struct nfsd4_destroy_session *sessionid) 2013 struct nfsd4_destroy_session *sessionid)
1919{ 2014{
1920 struct nfsd4_session *ses; 2015 struct nfsd4_session *ses;
1921 __be32 status = nfserr_badsession; 2016 __be32 status;
1922 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); 2017 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id);
1923 2018
1924 /* Notes: 2019 nfs4_lock_state();
1925 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid 2020 status = nfserr_not_only_op;
1926 * - Should we return nfserr_back_chan_busy if waiting for
1927 * callbacks on to-be-destroyed session?
1928 * - Do we need to clear any callback info from previous session?
1929 */
1930
1931 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 2021 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1932 if (!nfsd4_last_compound_op(r)) 2022 if (!nfsd4_last_compound_op(r))
1933 return nfserr_not_only_op; 2023 goto out;
1934 } 2024 }
1935 dump_sessionid(__func__, &sessionid->sessionid); 2025 dump_sessionid(__func__, &sessionid->sessionid);
1936 spin_lock(&nn->client_lock); 2026 spin_lock(&nn->client_lock);
1937 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); 2027 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
1938 if (!ses) { 2028 status = nfserr_badsession;
1939 spin_unlock(&nn->client_lock); 2029 if (!ses)
1940 goto out; 2030 goto out_client_lock;
1941 } 2031 status = mark_session_dead_locked(ses);
1942 2032 if (status)
2033 goto out_client_lock;
1943 unhash_session(ses); 2034 unhash_session(ses);
1944 spin_unlock(&nn->client_lock); 2035 spin_unlock(&nn->client_lock);
1945 2036
1946 nfs4_lock_state();
1947 nfsd4_probe_callback_sync(ses->se_client); 2037 nfsd4_probe_callback_sync(ses->se_client);
1948 nfs4_unlock_state();
1949 2038
1950 spin_lock(&nn->client_lock); 2039 spin_lock(&nn->client_lock);
1951 nfsd4_del_conns(ses); 2040 free_session(ses);
1952 nfsd4_put_session_locked(ses);
1953 spin_unlock(&nn->client_lock);
1954 status = nfs_ok; 2041 status = nfs_ok;
2042out_client_lock:
2043 spin_unlock(&nn->client_lock);
1955out: 2044out:
1956 dprintk("%s returns %d\n", __func__, ntohl(status)); 2045 nfs4_unlock_state();
1957 return status; 2046 return status;
1958} 2047}
1959 2048
@@ -2013,6 +2102,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2013{ 2102{
2014 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2103 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2015 struct nfsd4_session *session; 2104 struct nfsd4_session *session;
2105 struct nfs4_client *clp;
2016 struct nfsd4_slot *slot; 2106 struct nfsd4_slot *slot;
2017 struct nfsd4_conn *conn; 2107 struct nfsd4_conn *conn;
2018 __be32 status; 2108 __be32 status;
@@ -2033,19 +2123,26 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2033 status = nfserr_badsession; 2123 status = nfserr_badsession;
2034 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); 2124 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2035 if (!session) 2125 if (!session)
2036 goto out; 2126 goto out_no_session;
2127 clp = session->se_client;
2128 status = get_client_locked(clp);
2129 if (status)
2130 goto out_no_session;
2131 status = nfsd4_get_session_locked(session);
2132 if (status)
2133 goto out_put_client;
2037 2134
2038 status = nfserr_too_many_ops; 2135 status = nfserr_too_many_ops;
2039 if (nfsd4_session_too_many_ops(rqstp, session)) 2136 if (nfsd4_session_too_many_ops(rqstp, session))
2040 goto out; 2137 goto out_put_session;
2041 2138
2042 status = nfserr_req_too_big; 2139 status = nfserr_req_too_big;
2043 if (nfsd4_request_too_big(rqstp, session)) 2140 if (nfsd4_request_too_big(rqstp, session))
2044 goto out; 2141 goto out_put_session;
2045 2142
2046 status = nfserr_badslot; 2143 status = nfserr_badslot;
2047 if (seq->slotid >= session->se_fchannel.maxreqs) 2144 if (seq->slotid >= session->se_fchannel.maxreqs)
2048 goto out; 2145 goto out_put_session;
2049 2146
2050 slot = session->se_slots[seq->slotid]; 2147 slot = session->se_slots[seq->slotid];
2051 dprintk("%s: slotid %d\n", __func__, seq->slotid); 2148 dprintk("%s: slotid %d\n", __func__, seq->slotid);
@@ -2060,7 +2157,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2060 if (status == nfserr_replay_cache) { 2157 if (status == nfserr_replay_cache) {
2061 status = nfserr_seq_misordered; 2158 status = nfserr_seq_misordered;
2062 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 2159 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2063 goto out; 2160 goto out_put_session;
2064 cstate->slot = slot; 2161 cstate->slot = slot;
2065 cstate->session = session; 2162 cstate->session = session;
2066 /* Return the cached reply status and set cstate->status 2163 /* Return the cached reply status and set cstate->status
@@ -2070,7 +2167,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2070 goto out; 2167 goto out;
2071 } 2168 }
2072 if (status) 2169 if (status)
2073 goto out; 2170 goto out_put_session;
2074 2171
2075 nfsd4_sequence_check_conn(conn, session); 2172 nfsd4_sequence_check_conn(conn, session);
2076 conn = NULL; 2173 conn = NULL;
@@ -2087,27 +2184,27 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2087 cstate->session = session; 2184 cstate->session = session;
2088 2185
2089out: 2186out:
2090 /* Hold a session reference until done processing the compound. */ 2187 switch (clp->cl_cb_state) {
2091 if (cstate->session) { 2188 case NFSD4_CB_DOWN:
2092 struct nfs4_client *clp = session->se_client; 2189 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2093 2190 break;
2094 nfsd4_get_session(cstate->session); 2191 case NFSD4_CB_FAULT:
2095 atomic_inc(&clp->cl_refcount); 2192 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2096 switch (clp->cl_cb_state) { 2193 break;
2097 case NFSD4_CB_DOWN: 2194 default:
2098 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 2195 seq->status_flags = 0;
2099 break;
2100 case NFSD4_CB_FAULT:
2101 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2102 break;
2103 default:
2104 seq->status_flags = 0;
2105 }
2106 } 2196 }
2197 if (!list_empty(&clp->cl_revoked))
2198 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2199out_no_session:
2107 kfree(conn); 2200 kfree(conn);
2108 spin_unlock(&nn->client_lock); 2201 spin_unlock(&nn->client_lock);
2109 dprintk("%s: return %d\n", __func__, ntohl(status));
2110 return status; 2202 return status;
2203out_put_session:
2204 nfsd4_put_session(session);
2205out_put_client:
2206 put_client_renew_locked(clp);
2207 goto out_no_session;
2111} 2208}
2112 2209
2113__be32 2210__be32
@@ -2120,17 +2217,12 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2120 nfs4_lock_state(); 2217 nfs4_lock_state();
2121 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 2218 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2122 conf = find_confirmed_client(&dc->clientid, true, nn); 2219 conf = find_confirmed_client(&dc->clientid, true, nn);
2220 WARN_ON_ONCE(conf && unconf);
2123 2221
2124 if (conf) { 2222 if (conf) {
2125 clp = conf; 2223 clp = conf;
2126 2224
2127 if (!is_client_expired(conf) && client_has_state(conf)) { 2225 if (client_has_state(conf)) {
2128 status = nfserr_clientid_busy;
2129 goto out;
2130 }
2131
2132 /* rfc5661 18.50.3 */
2133 if (cstate->session && conf == cstate->session->se_client) {
2134 status = nfserr_clientid_busy; 2226 status = nfserr_clientid_busy;
2135 goto out; 2227 goto out;
2136 } 2228 }
@@ -2144,7 +2236,6 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2144 expire_client(clp); 2236 expire_client(clp);
2145out: 2237out:
2146 nfs4_unlock_state(); 2238 nfs4_unlock_state();
2147 dprintk("%s return %d\n", __func__, ntohl(status));
2148 return status; 2239 return status;
2149} 2240}
2150 2241
@@ -2282,8 +2373,12 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2282 expire_client(unconf); 2373 expire_client(unconf);
2283 } else { /* case 3: normal case; new or rebooted client */ 2374 } else { /* case 3: normal case; new or rebooted client */
2284 conf = find_confirmed_client_by_name(&unconf->cl_name, nn); 2375 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2285 if (conf) 2376 if (conf) {
2377 status = mark_client_expired(conf);
2378 if (status)
2379 goto out;
2286 expire_client(conf); 2380 expire_client(conf);
2381 }
2287 move_to_confirmed(unconf); 2382 move_to_confirmed(unconf);
2288 nfsd4_probe_callback(unconf); 2383 nfsd4_probe_callback(unconf);
2289 } 2384 }
@@ -2303,7 +2398,6 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2303 unsigned int hashval = file_hashval(ino); 2398 unsigned int hashval = file_hashval(ino);
2304 2399
2305 atomic_set(&fp->fi_ref, 1); 2400 atomic_set(&fp->fi_ref, 1);
2306 INIT_LIST_HEAD(&fp->fi_hash);
2307 INIT_LIST_HEAD(&fp->fi_stateids); 2401 INIT_LIST_HEAD(&fp->fi_stateids);
2308 INIT_LIST_HEAD(&fp->fi_delegations); 2402 INIT_LIST_HEAD(&fp->fi_delegations);
2309 fp->fi_inode = igrab(ino); 2403 fp->fi_inode = igrab(ino);
@@ -2312,7 +2406,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2312 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2406 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2313 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2407 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2314 spin_lock(&recall_lock); 2408 spin_lock(&recall_lock);
2315 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 2409 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
2316 spin_unlock(&recall_lock); 2410 spin_unlock(&recall_lock);
2317} 2411}
2318 2412
@@ -2498,7 +2592,7 @@ find_file(struct inode *ino)
2498 struct nfs4_file *fp; 2592 struct nfs4_file *fp;
2499 2593
2500 spin_lock(&recall_lock); 2594 spin_lock(&recall_lock);
2501 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 2595 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2502 if (fp->fi_inode == ino) { 2596 if (fp->fi_inode == ino) {
2503 get_nfs4_file(fp); 2597 get_nfs4_file(fp);
2504 spin_unlock(&recall_lock); 2598 spin_unlock(&recall_lock);
@@ -2521,8 +2615,6 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2521 struct nfs4_ol_stateid *stp; 2615 struct nfs4_ol_stateid *stp;
2522 __be32 ret; 2616 __be32 ret;
2523 2617
2524 dprintk("NFSD: nfs4_share_conflict\n");
2525
2526 fp = find_file(ino); 2618 fp = find_file(ino);
2527 if (!fp) 2619 if (!fp)
2528 return nfs_ok; 2620 return nfs_ok;
@@ -2541,6 +2633,9 @@ out:
2541 2633
2542static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 2634static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2543{ 2635{
2636 struct nfs4_client *clp = dp->dl_stid.sc_client;
2637 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2638
2544 /* We're assuming the state code never drops its reference 2639 /* We're assuming the state code never drops its reference
2545 * without first removing the lease. Since we're in this lease 2640 * without first removing the lease. Since we're in this lease
2546 * callback (and since the lease code is serialized by the kernel 2641 * callback (and since the lease code is serialized by the kernel
@@ -2548,7 +2643,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2548 * it's safe to take a reference: */ 2643 * it's safe to take a reference: */
2549 atomic_inc(&dp->dl_count); 2644 atomic_inc(&dp->dl_count);
2550 2645
2551 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2646 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
2552 2647
2553 /* only place dl_time is set. protected by lock_flocks*/ 2648 /* only place dl_time is set. protected by lock_flocks*/
2554 dp->dl_time = get_seconds(); 2649 dp->dl_time = get_seconds();
@@ -2694,7 +2789,7 @@ static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2694} 2789}
2695 2790
2696static __be32 2791static __be32
2697nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open, 2792nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
2698 struct nfs4_delegation **dp) 2793 struct nfs4_delegation **dp)
2699{ 2794{
2700 int flags; 2795 int flags;
@@ -3019,7 +3114,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3019 if (fp) { 3114 if (fp) {
3020 if ((status = nfs4_check_open(fp, open, &stp))) 3115 if ((status = nfs4_check_open(fp, open, &stp)))
3021 goto out; 3116 goto out;
3022 status = nfs4_check_deleg(cl, fp, open, &dp); 3117 status = nfs4_check_deleg(cl, open, &dp);
3023 if (status) 3118 if (status)
3024 goto out; 3119 goto out;
3025 } else { 3120 } else {
@@ -3197,13 +3292,12 @@ nfs4_laundromat(struct nfsd_net *nn)
3197 clientid_val = t; 3292 clientid_val = t;
3198 break; 3293 break;
3199 } 3294 }
3200 if (atomic_read(&clp->cl_refcount)) { 3295 if (mark_client_expired_locked(clp)) {
3201 dprintk("NFSD: client in use (clientid %08x)\n", 3296 dprintk("NFSD: client in use (clientid %08x)\n",
3202 clp->cl_clientid.cl_id); 3297 clp->cl_clientid.cl_id);
3203 continue; 3298 continue;
3204 } 3299 }
3205 unhash_client_locked(clp); 3300 list_move(&clp->cl_lru, &reaplist);
3206 list_add(&clp->cl_lru, &reaplist);
3207 } 3301 }
3208 spin_unlock(&nn->client_lock); 3302 spin_unlock(&nn->client_lock);
3209 list_for_each_safe(pos, next, &reaplist) { 3303 list_for_each_safe(pos, next, &reaplist) {
@@ -3213,7 +3307,7 @@ nfs4_laundromat(struct nfsd_net *nn)
3213 expire_client(clp); 3307 expire_client(clp);
3214 } 3308 }
3215 spin_lock(&recall_lock); 3309 spin_lock(&recall_lock);
3216 list_for_each_safe(pos, next, &del_recall_lru) { 3310 list_for_each_safe(pos, next, &nn->del_recall_lru) {
3217 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3311 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3218 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) 3312 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3219 continue; 3313 continue;
@@ -3228,7 +3322,7 @@ nfs4_laundromat(struct nfsd_net *nn)
3228 spin_unlock(&recall_lock); 3322 spin_unlock(&recall_lock);
3229 list_for_each_safe(pos, next, &reaplist) { 3323 list_for_each_safe(pos, next, &reaplist) {
3230 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3324 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3231 unhash_delegation(dp); 3325 revoke_delegation(dp);
3232 } 3326 }
3233 test_val = nn->nfsd4_lease; 3327 test_val = nn->nfsd4_lease;
3234 list_for_each_safe(pos, next, &nn->close_lru) { 3328 list_for_each_safe(pos, next, &nn->close_lru) {
@@ -3271,16 +3365,6 @@ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *s
3271 return nfs_ok; 3365 return nfs_ok;
3272} 3366}
3273 3367
3274static int
3275STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
3276{
3277 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
3278 return 0;
3279 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3280 STATEID_VAL(stateid));
3281 return 1;
3282}
3283
3284static inline int 3368static inline int
3285access_permit_read(struct nfs4_ol_stateid *stp) 3369access_permit_read(struct nfs4_ol_stateid *stp)
3286{ 3370{
@@ -3397,13 +3481,24 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3397 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 3481 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3398 if (status) 3482 if (status)
3399 return status; 3483 return status;
3400 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID))) 3484 switch (s->sc_type) {
3485 case NFS4_DELEG_STID:
3486 return nfs_ok;
3487 case NFS4_REVOKED_DELEG_STID:
3488 return nfserr_deleg_revoked;
3489 case NFS4_OPEN_STID:
3490 case NFS4_LOCK_STID:
3491 ols = openlockstateid(s);
3492 if (ols->st_stateowner->so_is_open_owner
3493 && !(openowner(ols->st_stateowner)->oo_flags
3494 & NFS4_OO_CONFIRMED))
3495 return nfserr_bad_stateid;
3401 return nfs_ok; 3496 return nfs_ok;
3402 ols = openlockstateid(s); 3497 default:
3403 if (ols->st_stateowner->so_is_open_owner 3498 printk("unknown stateid type %x\n", s->sc_type);
3404 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3499 case NFS4_CLOSED_STID:
3405 return nfserr_bad_stateid; 3500 return nfserr_bad_stateid;
3406 return nfs_ok; 3501 }
3407} 3502}
3408 3503
3409static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, 3504static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
@@ -3411,19 +3506,20 @@ static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
3411 struct nfsd_net *nn) 3506 struct nfsd_net *nn)
3412{ 3507{
3413 struct nfs4_client *cl; 3508 struct nfs4_client *cl;
3509 __be32 status;
3414 3510
3415 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3511 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3416 return nfserr_bad_stateid; 3512 return nfserr_bad_stateid;
3417 if (STALE_STATEID(stateid, nn)) 3513 status = lookup_clientid(&stateid->si_opaque.so_clid, sessions,
3514 nn, &cl);
3515 if (status == nfserr_stale_clientid)
3418 return nfserr_stale_stateid; 3516 return nfserr_stale_stateid;
3419 cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn); 3517 if (status)
3420 if (!cl) 3518 return status;
3421 return nfserr_expired;
3422 *s = find_stateid_by_type(cl, stateid, typemask); 3519 *s = find_stateid_by_type(cl, stateid, typemask);
3423 if (!*s) 3520 if (!*s)
3424 return nfserr_bad_stateid; 3521 return nfserr_bad_stateid;
3425 return nfs_ok; 3522 return nfs_ok;
3426
3427} 3523}
3428 3524
3429/* 3525/*
@@ -3533,6 +3629,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3533{ 3629{
3534 stateid_t *stateid = &free_stateid->fr_stateid; 3630 stateid_t *stateid = &free_stateid->fr_stateid;
3535 struct nfs4_stid *s; 3631 struct nfs4_stid *s;
3632 struct nfs4_delegation *dp;
3536 struct nfs4_client *cl = cstate->session->se_client; 3633 struct nfs4_client *cl = cstate->session->se_client;
3537 __be32 ret = nfserr_bad_stateid; 3634 __be32 ret = nfserr_bad_stateid;
3538 3635
@@ -3554,6 +3651,11 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3554 else 3651 else
3555 ret = nfserr_locks_held; 3652 ret = nfserr_locks_held;
3556 break; 3653 break;
3654 case NFS4_REVOKED_DELEG_STID:
3655 dp = delegstateid(s);
3656 destroy_revoked_delegation(dp);
3657 ret = nfs_ok;
3658 break;
3557 default: 3659 default:
3558 ret = nfserr_bad_stateid; 3660 ret = nfserr_bad_stateid;
3559 } 3661 }
@@ -3578,10 +3680,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
3578 status = nfsd4_check_seqid(cstate, sop, seqid); 3680 status = nfsd4_check_seqid(cstate, sop, seqid);
3579 if (status) 3681 if (status)
3580 return status; 3682 return status;
3581 if (stp->st_stid.sc_type == NFS4_CLOSED_STID) 3683 if (stp->st_stid.sc_type == NFS4_CLOSED_STID
3684 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
3582 /* 3685 /*
3583 * "Closed" stateid's exist *only* to return 3686 * "Closed" stateid's exist *only* to return
3584 * nfserr_replay_me from the previous step. 3687 * nfserr_replay_me from the previous step, and
3688 * revoked delegations are kept only for free_stateid.
3585 */ 3689 */
3586 return nfserr_bad_stateid; 3690 return nfserr_bad_stateid;
3587 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 3691 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
@@ -3611,7 +3715,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3611 if (status) 3715 if (status)
3612 return status; 3716 return status;
3613 *stpp = openlockstateid(s); 3717 *stpp = openlockstateid(s);
3614 cstate->replay_owner = (*stpp)->st_stateowner; 3718 if (!nfsd4_has_session(cstate))
3719 cstate->replay_owner = (*stpp)->st_stateowner;
3615 3720
3616 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); 3721 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3617} 3722}
@@ -3669,6 +3774,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3669 nfsd4_client_record_create(oo->oo_owner.so_client); 3774 nfsd4_client_record_create(oo->oo_owner.so_client);
3670 status = nfs_ok; 3775 status = nfs_ok;
3671out: 3776out:
3777 nfsd4_bump_seqid(cstate, status);
3672 if (!cstate->replay_owner) 3778 if (!cstate->replay_owner)
3673 nfs4_unlock_state(); 3779 nfs4_unlock_state();
3674 return status; 3780 return status;
@@ -3752,31 +3858,12 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
3752 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3858 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3753 status = nfs_ok; 3859 status = nfs_ok;
3754out: 3860out:
3861 nfsd4_bump_seqid(cstate, status);
3755 if (!cstate->replay_owner) 3862 if (!cstate->replay_owner)
3756 nfs4_unlock_state(); 3863 nfs4_unlock_state();
3757 return status; 3864 return status;
3758} 3865}
3759 3866
3760void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3761{
3762 struct nfs4_openowner *oo;
3763 struct nfs4_ol_stateid *s;
3764
3765 if (!so->so_is_open_owner)
3766 return;
3767 oo = openowner(so);
3768 s = oo->oo_last_closed_stid;
3769 if (!s)
3770 return;
3771 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3772 /* Release the last_closed_stid on the next seqid bump: */
3773 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3774 return;
3775 }
3776 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3777 release_last_closed_stateid(oo);
3778}
3779
3780static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 3867static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3781{ 3868{
3782 unhash_open_stateid(s); 3869 unhash_open_stateid(s);
@@ -3805,28 +3892,30 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3805 &close->cl_stateid, 3892 &close->cl_stateid,
3806 NFS4_OPEN_STID|NFS4_CLOSED_STID, 3893 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3807 &stp, nn); 3894 &stp, nn);
3895 nfsd4_bump_seqid(cstate, status);
3808 if (status) 3896 if (status)
3809 goto out; 3897 goto out;
3810 oo = openowner(stp->st_stateowner); 3898 oo = openowner(stp->st_stateowner);
3811 status = nfs_ok;
3812 update_stateid(&stp->st_stid.sc_stateid); 3899 update_stateid(&stp->st_stid.sc_stateid);
3813 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3900 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3814 3901
3815 nfsd4_close_open_stateid(stp); 3902 nfsd4_close_open_stateid(stp);
3816 release_last_closed_stateid(oo); 3903
3817 oo->oo_last_closed_stid = stp; 3904 if (cstate->minorversion) {
3905 unhash_stid(&stp->st_stid);
3906 free_generic_stateid(stp);
3907 } else
3908 oo->oo_last_closed_stid = stp;
3818 3909
3819 if (list_empty(&oo->oo_owner.so_stateids)) { 3910 if (list_empty(&oo->oo_owner.so_stateids)) {
3820 if (cstate->minorversion) { 3911 if (cstate->minorversion)
3821 release_openowner(oo); 3912 release_openowner(oo);
3822 cstate->replay_owner = NULL; 3913 else {
3823 } else {
3824 /* 3914 /*
3825 * In the 4.0 case we need to keep the owners around a 3915 * In the 4.0 case we need to keep the owners around a
3826 * little while to handle CLOSE replay. 3916 * little while to handle CLOSE replay.
3827 */ 3917 */
3828 if (list_empty(&oo->oo_owner.so_stateids)) 3918 move_to_close_lru(oo, SVC_NET(rqstp));
3829 move_to_close_lru(oo, SVC_NET(rqstp));
3830 } 3919 }
3831 } 3920 }
3832out: 3921out:
@@ -3858,7 +3947,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3858 if (status) 3947 if (status)
3859 goto out; 3948 goto out;
3860 3949
3861 unhash_delegation(dp); 3950 destroy_delegation(dp);
3862out: 3951out:
3863 nfs4_unlock_state(); 3952 nfs4_unlock_state();
3864 3953
@@ -4236,6 +4325,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4236out: 4325out:
4237 if (status && new_state) 4326 if (status && new_state)
4238 release_lockowner(lock_sop); 4327 release_lockowner(lock_sop);
4328 nfsd4_bump_seqid(cstate, status);
4239 if (!cstate->replay_owner) 4329 if (!cstate->replay_owner)
4240 nfs4_unlock_state(); 4330 nfs4_unlock_state();
4241 if (file_lock) 4331 if (file_lock)
@@ -4345,6 +4435,7 @@ __be32
4345nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4435nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4346 struct nfsd4_locku *locku) 4436 struct nfsd4_locku *locku)
4347{ 4437{
4438 struct nfs4_lockowner *lo;
4348 struct nfs4_ol_stateid *stp; 4439 struct nfs4_ol_stateid *stp;
4349 struct file *filp = NULL; 4440 struct file *filp = NULL;
4350 struct file_lock *file_lock = NULL; 4441 struct file_lock *file_lock = NULL;
@@ -4377,9 +4468,10 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4377 status = nfserr_jukebox; 4468 status = nfserr_jukebox;
4378 goto out; 4469 goto out;
4379 } 4470 }
4471 lo = lockowner(stp->st_stateowner);
4380 locks_init_lock(file_lock); 4472 locks_init_lock(file_lock);
4381 file_lock->fl_type = F_UNLCK; 4473 file_lock->fl_type = F_UNLCK;
4382 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner); 4474 file_lock->fl_owner = (fl_owner_t)lo;
4383 file_lock->fl_pid = current->tgid; 4475 file_lock->fl_pid = current->tgid;
4384 file_lock->fl_file = filp; 4476 file_lock->fl_file = filp;
4385 file_lock->fl_flags = FL_POSIX; 4477 file_lock->fl_flags = FL_POSIX;
@@ -4390,21 +4482,21 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4390 locku->lu_length); 4482 locku->lu_length);
4391 nfs4_transform_lock_offset(file_lock); 4483 nfs4_transform_lock_offset(file_lock);
4392 4484
4393 /*
4394 * Try to unlock the file in the VFS.
4395 */
4396 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); 4485 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4397 if (err) { 4486 if (err) {
4398 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 4487 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4399 goto out_nfserr; 4488 goto out_nfserr;
4400 } 4489 }
4401 /*
4402 * OK, unlock succeeded; the only thing left to do is update the stateid.
4403 */
4404 update_stateid(&stp->st_stid.sc_stateid); 4490 update_stateid(&stp->st_stid.sc_stateid);
4405 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4491 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4406 4492
4493 if (nfsd4_has_session(cstate) && !check_for_locks(stp->st_file, lo)) {
4494 WARN_ON_ONCE(cstate->replay_owner);
4495 release_lockowner(lo);
4496 }
4497
4407out: 4498out:
4499 nfsd4_bump_seqid(cstate, status);
4408 if (!cstate->replay_owner) 4500 if (!cstate->replay_owner)
4409 nfs4_unlock_state(); 4501 nfs4_unlock_state();
4410 if (file_lock) 4502 if (file_lock)
@@ -4597,6 +4689,8 @@ nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
4597 4689
4598u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) 4690u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
4599{ 4691{
4692 if (mark_client_expired(clp))
4693 return 0;
4600 expire_client(clp); 4694 expire_client(clp);
4601 return 1; 4695 return 1;
4602} 4696}
@@ -4703,7 +4797,7 @@ u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
4703 spin_unlock(&recall_lock); 4797 spin_unlock(&recall_lock);
4704 4798
4705 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 4799 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4706 unhash_delegation(dp); 4800 revoke_delegation(dp);
4707 4801
4708 return count; 4802 return count;
4709} 4803}
@@ -4775,12 +4869,6 @@ struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_
4775void 4869void
4776nfs4_state_init(void) 4870nfs4_state_init(void)
4777{ 4871{
4778 int i;
4779
4780 for (i = 0; i < FILE_HASH_SIZE; i++) {
4781 INIT_LIST_HEAD(&file_hashtbl[i]);
4782 }
4783 INIT_LIST_HEAD(&del_recall_lru);
4784} 4872}
4785 4873
4786/* 4874/*
@@ -4844,6 +4932,7 @@ static int nfs4_state_create_net(struct net *net)
4844 nn->unconf_name_tree = RB_ROOT; 4932 nn->unconf_name_tree = RB_ROOT;
4845 INIT_LIST_HEAD(&nn->client_lru); 4933 INIT_LIST_HEAD(&nn->client_lru);
4846 INIT_LIST_HEAD(&nn->close_lru); 4934 INIT_LIST_HEAD(&nn->close_lru);
4935 INIT_LIST_HEAD(&nn->del_recall_lru);
4847 spin_lock_init(&nn->client_lock); 4936 spin_lock_init(&nn->client_lock);
4848 4937
4849 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 4938 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
@@ -4956,16 +5045,14 @@ nfs4_state_shutdown_net(struct net *net)
4956 5045
4957 INIT_LIST_HEAD(&reaplist); 5046 INIT_LIST_HEAD(&reaplist);
4958 spin_lock(&recall_lock); 5047 spin_lock(&recall_lock);
4959 list_for_each_safe(pos, next, &del_recall_lru) { 5048 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4960 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5049 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4961 if (dp->dl_stid.sc_client->net != net)
4962 continue;
4963 list_move(&dp->dl_recall_lru, &reaplist); 5050 list_move(&dp->dl_recall_lru, &reaplist);
4964 } 5051 }
4965 spin_unlock(&recall_lock); 5052 spin_unlock(&recall_lock);
4966 list_for_each_safe(pos, next, &reaplist) { 5053 list_for_each_safe(pos, next, &reaplist) {
4967 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5054 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4968 unhash_delegation(dp); 5055 destroy_delegation(dp);
4969 } 5056 }
4970 5057
4971 nfsd4_client_tracking_exit(net); 5058 nfsd4_client_tracking_exit(net);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2502951714b1..6cd86e0fe450 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -344,10 +344,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
344 all 32 bits of 'nseconds'. */ 344 all 32 bits of 'nseconds'. */
345 READ_BUF(12); 345 READ_BUF(12);
346 len += 12; 346 len += 12;
347 READ32(dummy32); 347 READ64(iattr->ia_atime.tv_sec);
348 if (dummy32)
349 return nfserr_inval;
350 READ32(iattr->ia_atime.tv_sec);
351 READ32(iattr->ia_atime.tv_nsec); 348 READ32(iattr->ia_atime.tv_nsec);
352 if (iattr->ia_atime.tv_nsec >= (u32)1000000000) 349 if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
353 return nfserr_inval; 350 return nfserr_inval;
@@ -370,10 +367,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
370 all 32 bits of 'nseconds'. */ 367 all 32 bits of 'nseconds'. */
371 READ_BUF(12); 368 READ_BUF(12);
372 len += 12; 369 len += 12;
373 READ32(dummy32); 370 READ64(iattr->ia_mtime.tv_sec);
374 if (dummy32)
375 return nfserr_inval;
376 READ32(iattr->ia_mtime.tv_sec);
377 READ32(iattr->ia_mtime.tv_nsec); 371 READ32(iattr->ia_mtime.tv_nsec);
378 if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) 372 if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
379 return nfserr_inval; 373 return nfserr_inval;
@@ -804,6 +798,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
804 open->op_iattr.ia_valid = 0; 798 open->op_iattr.ia_valid = 0;
805 open->op_openowner = NULL; 799 open->op_openowner = NULL;
806 800
801 open->op_xdr_error = 0;
807 /* seqid, share_access, share_deny, clientid, ownerlen */ 802 /* seqid, share_access, share_deny, clientid, ownerlen */
808 READ_BUF(4); 803 READ_BUF(4);
809 READ32(open->op_seqid); 804 READ32(open->op_seqid);
@@ -1692,36 +1687,6 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
1692} while (0) 1687} while (0)
1693#define ADJUST_ARGS() resp->p = p 1688#define ADJUST_ARGS() resp->p = p
1694 1689
1695/*
1696 * Header routine to setup seqid operation replay cache
1697 */
1698#define ENCODE_SEQID_OP_HEAD \
1699 __be32 *save; \
1700 \
1701 save = resp->p;
1702
1703/*
1704 * Routine for encoding the result of a "seqid-mutating" NFSv4 operation. This
1705 * is where sequence id's are incremented, and the replay cache is filled.
1706 * Note that we increment sequence id's here, at the last moment, so we're sure
1707 * we know whether the error to be returned is a sequence id mutating error.
1708 */
1709
1710static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, __be32 nfserr)
1711{
1712 struct nfs4_stateowner *stateowner = resp->cstate.replay_owner;
1713
1714 if (seqid_mutating_err(ntohl(nfserr)) && stateowner) {
1715 stateowner->so_seqid++;
1716 stateowner->so_replay.rp_status = nfserr;
1717 stateowner->so_replay.rp_buflen =
1718 (char *)resp->p - (char *)save;
1719 memcpy(stateowner->so_replay.rp_buf, save,
1720 stateowner->so_replay.rp_buflen);
1721 nfsd4_purge_closed_stateid(stateowner);
1722 }
1723}
1724
1725/* Encode as an array of strings the string given with components 1690/* Encode as an array of strings the string given with components
1726 * separated @sep, escaped with esc_enter and esc_exit. 1691 * separated @sep, escaped with esc_enter and esc_exit.
1727 */ 1692 */
@@ -2401,8 +2366,7 @@ out_acl:
2401 if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { 2366 if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
2402 if ((buflen -= 12) < 0) 2367 if ((buflen -= 12) < 0)
2403 goto out_resource; 2368 goto out_resource;
2404 WRITE32(0); 2369 WRITE64((s64)stat.atime.tv_sec);
2405 WRITE32(stat.atime.tv_sec);
2406 WRITE32(stat.atime.tv_nsec); 2370 WRITE32(stat.atime.tv_nsec);
2407 } 2371 }
2408 if (bmval1 & FATTR4_WORD1_TIME_DELTA) { 2372 if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
@@ -2415,15 +2379,13 @@ out_acl:
2415 if (bmval1 & FATTR4_WORD1_TIME_METADATA) { 2379 if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
2416 if ((buflen -= 12) < 0) 2380 if ((buflen -= 12) < 0)
2417 goto out_resource; 2381 goto out_resource;
2418 WRITE32(0); 2382 WRITE64((s64)stat.ctime.tv_sec);
2419 WRITE32(stat.ctime.tv_sec);
2420 WRITE32(stat.ctime.tv_nsec); 2383 WRITE32(stat.ctime.tv_nsec);
2421 } 2384 }
2422 if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { 2385 if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
2423 if ((buflen -= 12) < 0) 2386 if ((buflen -= 12) < 0)
2424 goto out_resource; 2387 goto out_resource;
2425 WRITE32(0); 2388 WRITE64((s64)stat.mtime.tv_sec);
2426 WRITE32(stat.mtime.tv_sec);
2427 WRITE32(stat.mtime.tv_nsec); 2389 WRITE32(stat.mtime.tv_nsec);
2428 } 2390 }
2429 if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { 2391 if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
@@ -2661,12 +2623,9 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
2661static __be32 2623static __be32
2662nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) 2624nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
2663{ 2625{
2664 ENCODE_SEQID_OP_HEAD;
2665
2666 if (!nfserr) 2626 if (!nfserr)
2667 nfsd4_encode_stateid(resp, &close->cl_stateid); 2627 nfsd4_encode_stateid(resp, &close->cl_stateid);
2668 2628
2669 encode_seqid_op_tail(resp, save, nfserr);
2670 return nfserr; 2629 return nfserr;
2671} 2630}
2672 2631
@@ -2762,14 +2721,11 @@ nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denie
2762static __be32 2721static __be32
2763nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock) 2722nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
2764{ 2723{
2765 ENCODE_SEQID_OP_HEAD;
2766
2767 if (!nfserr) 2724 if (!nfserr)
2768 nfsd4_encode_stateid(resp, &lock->lk_resp_stateid); 2725 nfsd4_encode_stateid(resp, &lock->lk_resp_stateid);
2769 else if (nfserr == nfserr_denied) 2726 else if (nfserr == nfserr_denied)
2770 nfsd4_encode_lock_denied(resp, &lock->lk_denied); 2727 nfsd4_encode_lock_denied(resp, &lock->lk_denied);
2771 2728
2772 encode_seqid_op_tail(resp, save, nfserr);
2773 return nfserr; 2729 return nfserr;
2774} 2730}
2775 2731
@@ -2784,12 +2740,9 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
2784static __be32 2740static __be32
2785nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku) 2741nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
2786{ 2742{
2787 ENCODE_SEQID_OP_HEAD;
2788
2789 if (!nfserr) 2743 if (!nfserr)
2790 nfsd4_encode_stateid(resp, &locku->lu_stateid); 2744 nfsd4_encode_stateid(resp, &locku->lu_stateid);
2791 2745
2792 encode_seqid_op_tail(resp, save, nfserr);
2793 return nfserr; 2746 return nfserr;
2794} 2747}
2795 2748
@@ -2812,7 +2765,6 @@ static __be32
2812nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) 2765nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
2813{ 2766{
2814 __be32 *p; 2767 __be32 *p;
2815 ENCODE_SEQID_OP_HEAD;
2816 2768
2817 if (nfserr) 2769 if (nfserr)
2818 goto out; 2770 goto out;
@@ -2884,31 +2836,24 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
2884 } 2836 }
2885 /* XXX save filehandle here */ 2837 /* XXX save filehandle here */
2886out: 2838out:
2887 encode_seqid_op_tail(resp, save, nfserr);
2888 return nfserr; 2839 return nfserr;
2889} 2840}
2890 2841
2891static __be32 2842static __be32
2892nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) 2843nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
2893{ 2844{
2894 ENCODE_SEQID_OP_HEAD;
2895
2896 if (!nfserr) 2845 if (!nfserr)
2897 nfsd4_encode_stateid(resp, &oc->oc_resp_stateid); 2846 nfsd4_encode_stateid(resp, &oc->oc_resp_stateid);
2898 2847
2899 encode_seqid_op_tail(resp, save, nfserr);
2900 return nfserr; 2848 return nfserr;
2901} 2849}
2902 2850
2903static __be32 2851static __be32
2904nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) 2852nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
2905{ 2853{
2906 ENCODE_SEQID_OP_HEAD;
2907
2908 if (!nfserr) 2854 if (!nfserr)
2909 nfsd4_encode_stateid(resp, &od->od_stateid); 2855 nfsd4_encode_stateid(resp, &od->od_stateid);
2910 2856
2911 encode_seqid_op_tail(resp, save, nfserr);
2912 return nfserr; 2857 return nfserr;
2913} 2858}
2914 2859
@@ -3140,10 +3085,11 @@ static __be32
3140nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, 3085nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
3141 __be32 nfserr, struct svc_export *exp) 3086 __be32 nfserr, struct svc_export *exp)
3142{ 3087{
3143 u32 i, nflavs; 3088 u32 i, nflavs, supported;
3144 struct exp_flavor_info *flavs; 3089 struct exp_flavor_info *flavs;
3145 struct exp_flavor_info def_flavs[2]; 3090 struct exp_flavor_info def_flavs[2];
3146 __be32 *p; 3091 __be32 *p, *flavorsp;
3092 static bool report = true;
3147 3093
3148 if (nfserr) 3094 if (nfserr)
3149 goto out; 3095 goto out;
@@ -3167,33 +3113,40 @@ nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
3167 } 3113 }
3168 } 3114 }
3169 3115
3116 supported = 0;
3170 RESERVE_SPACE(4); 3117 RESERVE_SPACE(4);
3171 WRITE32(nflavs); 3118 flavorsp = p++; /* to be backfilled later */
3172 ADJUST_ARGS(); 3119 ADJUST_ARGS();
3120
3173 for (i = 0; i < nflavs; i++) { 3121 for (i = 0; i < nflavs; i++) {
3122 rpc_authflavor_t pf = flavs[i].pseudoflavor;
3174 struct rpcsec_gss_info info; 3123 struct rpcsec_gss_info info;
3175 3124
3176 if (rpcauth_get_gssinfo(flavs[i].pseudoflavor, &info) == 0) { 3125 if (rpcauth_get_gssinfo(pf, &info) == 0) {
3177 RESERVE_SPACE(4); 3126 supported++;
3127 RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4);
3178 WRITE32(RPC_AUTH_GSS); 3128 WRITE32(RPC_AUTH_GSS);
3179 ADJUST_ARGS();
3180 RESERVE_SPACE(4 + info.oid.len);
3181 WRITE32(info.oid.len); 3129 WRITE32(info.oid.len);
3182 WRITEMEM(info.oid.data, info.oid.len); 3130 WRITEMEM(info.oid.data, info.oid.len);
3183 ADJUST_ARGS();
3184 RESERVE_SPACE(4);
3185 WRITE32(info.qop); 3131 WRITE32(info.qop);
3186 ADJUST_ARGS();
3187 RESERVE_SPACE(4);
3188 WRITE32(info.service); 3132 WRITE32(info.service);
3189 ADJUST_ARGS(); 3133 ADJUST_ARGS();
3190 } else { 3134 } else if (pf < RPC_AUTH_MAXFLAVOR) {
3135 supported++;
3191 RESERVE_SPACE(4); 3136 RESERVE_SPACE(4);
3192 WRITE32(flavs[i].pseudoflavor); 3137 WRITE32(pf);
3193 ADJUST_ARGS(); 3138 ADJUST_ARGS();
3139 } else {
3140 if (report)
3141 pr_warn("NFS: SECINFO: security flavor %u "
3142 "is not supported\n", pf);
3194 } 3143 }
3195 } 3144 }
3196 3145
3146 if (nflavs != supported)
3147 report = false;
3148 *flavorsp = htonl(supported);
3149
3197out: 3150out:
3198 if (exp) 3151 if (exp)
3199 exp_put(exp); 3152 exp_put(exp);
@@ -3564,6 +3517,7 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
3564void 3517void
3565nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) 3518nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
3566{ 3519{
3520 struct nfs4_stateowner *so = resp->cstate.replay_owner;
3567 __be32 *statp; 3521 __be32 *statp;
3568 __be32 *p; 3522 __be32 *p;
3569 3523
@@ -3580,6 +3534,11 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
3580 /* nfsd4_check_drc_limit guarantees enough room for error status */ 3534 /* nfsd4_check_drc_limit guarantees enough room for error status */
3581 if (!op->status) 3535 if (!op->status)
3582 op->status = nfsd4_check_resp_size(resp, 0); 3536 op->status = nfsd4_check_resp_size(resp, 0);
3537 if (so) {
3538 so->so_replay.rp_status = op->status;
3539 so->so_replay.rp_buflen = (char *)resp->p - (char *)(statp+1);
3540 memcpy(so->so_replay.rp_buf, statp+1, so->so_replay.rp_buflen);
3541 }
3583status: 3542status:
3584 /* 3543 /*
3585 * Note: We write the status directly, instead of using WRITE32(), 3544 * Note: We write the status directly, instead of using WRITE32(),
@@ -3681,7 +3640,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
3681 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 3640 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3682 } 3641 }
3683 /* Renew the clientid on success and on replay */ 3642 /* Renew the clientid on success and on replay */
3684 release_session_client(cs->session); 3643 put_client_renew(cs->session->se_client);
3685 nfsd4_put_session(cs->session); 3644 nfsd4_put_session(cs->session);
3686 } 3645 }
3687 return 1; 3646 return 1;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index ca05f6dc3544..e76244edd748 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -11,6 +11,8 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sunrpc/addr.h> 12#include <linux/sunrpc/addr.h>
13#include <linux/highmem.h> 13#include <linux/highmem.h>
14#include <linux/log2.h>
15#include <linux/hash.h>
14#include <net/checksum.h> 16#include <net/checksum.h>
15 17
16#include "nfsd.h" 18#include "nfsd.h"
@@ -18,30 +20,49 @@
18 20
19#define NFSDDBG_FACILITY NFSDDBG_REPCACHE 21#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
20 22
21#define HASHSIZE 64 23/*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28#define TARGET_BUCKET_SIZE 64
22 29
23static struct hlist_head * cache_hash; 30static struct hlist_head * cache_hash;
24static struct list_head lru_head; 31static struct list_head lru_head;
25static struct kmem_cache *drc_slab; 32static struct kmem_cache *drc_slab;
26static unsigned int num_drc_entries; 33
34/* max number of entries allowed in the cache */
27static unsigned int max_drc_entries; 35static unsigned int max_drc_entries;
28 36
37/* number of significant bits in the hash value */
38static unsigned int maskbits;
39
29/* 40/*
30 * Calculate the hash index from an XID. 41 * Stats and other tracking of on the duplicate reply cache. All of these and
42 * the "rc" fields in nfsdstats are protected by the cache_lock
31 */ 43 */
32static inline u32 request_hash(u32 xid) 44
33{ 45/* total number of entries */
34 u32 h = xid; 46static unsigned int num_drc_entries;
35 h ^= (xid >> 24); 47
36 return h & (HASHSIZE-1); 48/* cache misses due only to checksum comparison failures */
37} 49static unsigned int payload_misses;
50
51/* amount of memory (in bytes) currently consumed by the DRC */
52static unsigned int drc_mem_usage;
53
54/* longest hash chain seen */
55static unsigned int longest_chain;
56
57/* size of cache when we saw the longest hash chain */
58static unsigned int longest_chain_cachesize;
38 59
39static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
40static void cache_cleaner_func(struct work_struct *unused); 61static void cache_cleaner_func(struct work_struct *unused);
41static int nfsd_reply_cache_shrink(struct shrinker *shrink, 62static int nfsd_reply_cache_shrink(struct shrinker *shrink,
42 struct shrink_control *sc); 63 struct shrink_control *sc);
43 64
44struct shrinker nfsd_reply_cache_shrinker = { 65static struct shrinker nfsd_reply_cache_shrinker = {
45 .shrink = nfsd_reply_cache_shrink, 66 .shrink = nfsd_reply_cache_shrink,
46 .seeks = 1, 67 .seeks = 1,
47}; 68};
@@ -82,6 +103,16 @@ nfsd_cache_size_limit(void)
82 return min_t(unsigned int, limit, 256*1024); 103 return min_t(unsigned int, limit, 256*1024);
83} 104}
84 105
106/*
107 * Compute the number of hash buckets we need. Divide the max cachesize by
108 * the "target" max bucket size, and round up to next power of two.
109 */
110static unsigned int
111nfsd_hashsize(unsigned int limit)
112{
113 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
114}
115
85static struct svc_cacherep * 116static struct svc_cacherep *
86nfsd_reply_cache_alloc(void) 117nfsd_reply_cache_alloc(void)
87{ 118{
@@ -100,12 +131,15 @@ nfsd_reply_cache_alloc(void)
100static void 131static void
101nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 132nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
102{ 133{
103 if (rp->c_type == RC_REPLBUFF) 134 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
135 drc_mem_usage -= rp->c_replvec.iov_len;
104 kfree(rp->c_replvec.iov_base); 136 kfree(rp->c_replvec.iov_base);
137 }
105 if (!hlist_unhashed(&rp->c_hash)) 138 if (!hlist_unhashed(&rp->c_hash))
106 hlist_del(&rp->c_hash); 139 hlist_del(&rp->c_hash);
107 list_del(&rp->c_lru); 140 list_del(&rp->c_lru);
108 --num_drc_entries; 141 --num_drc_entries;
142 drc_mem_usage -= sizeof(*rp);
109 kmem_cache_free(drc_slab, rp); 143 kmem_cache_free(drc_slab, rp);
110} 144}
111 145
@@ -119,9 +153,13 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
119 153
120int nfsd_reply_cache_init(void) 154int nfsd_reply_cache_init(void)
121{ 155{
156 unsigned int hashsize;
157
122 INIT_LIST_HEAD(&lru_head); 158 INIT_LIST_HEAD(&lru_head);
123 max_drc_entries = nfsd_cache_size_limit(); 159 max_drc_entries = nfsd_cache_size_limit();
124 num_drc_entries = 0; 160 num_drc_entries = 0;
161 hashsize = nfsd_hashsize(max_drc_entries);
162 maskbits = ilog2(hashsize);
125 163
126 register_shrinker(&nfsd_reply_cache_shrinker); 164 register_shrinker(&nfsd_reply_cache_shrinker);
127 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 165 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
@@ -129,7 +167,7 @@ int nfsd_reply_cache_init(void)
129 if (!drc_slab) 167 if (!drc_slab)
130 goto out_nomem; 168 goto out_nomem;
131 169
132 cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); 170 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
133 if (!cache_hash) 171 if (!cache_hash)
134 goto out_nomem; 172 goto out_nomem;
135 173
@@ -180,7 +218,7 @@ static void
180hash_refile(struct svc_cacherep *rp) 218hash_refile(struct svc_cacherep *rp)
181{ 219{
182 hlist_del_init(&rp->c_hash); 220 hlist_del_init(&rp->c_hash);
183 hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); 221 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
184} 222}
185 223
186static inline bool 224static inline bool
@@ -273,6 +311,26 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
273 return csum; 311 return csum;
274} 312}
275 313
314static bool
315nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
316{
317 /* Check RPC header info first */
318 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
319 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
320 rqstp->rq_arg.len != rp->c_len ||
321 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
322 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
323 return false;
324
325 /* compare checksum of NFS data */
326 if (csum != rp->c_csum) {
327 ++payload_misses;
328 return false;
329 }
330
331 return true;
332}
333
276/* 334/*
277 * Search the request hash for an entry that matches the given rqstp. 335 * Search the request hash for an entry that matches the given rqstp.
278 * Must be called with cache_lock held. Returns the found entry or 336 * Must be called with cache_lock held. Returns the found entry or
@@ -281,23 +339,30 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
281static struct svc_cacherep * 339static struct svc_cacherep *
282nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) 340nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
283{ 341{
284 struct svc_cacherep *rp; 342 struct svc_cacherep *rp, *ret = NULL;
285 struct hlist_head *rh; 343 struct hlist_head *rh;
286 __be32 xid = rqstp->rq_xid; 344 unsigned int entries = 0;
287 u32 proto = rqstp->rq_prot,
288 vers = rqstp->rq_vers,
289 proc = rqstp->rq_proc;
290 345
291 rh = &cache_hash[request_hash(xid)]; 346 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
292 hlist_for_each_entry(rp, rh, c_hash) { 347 hlist_for_each_entry(rp, rh, c_hash) {
293 if (xid == rp->c_xid && proc == rp->c_proc && 348 ++entries;
294 proto == rp->c_prot && vers == rp->c_vers && 349 if (nfsd_cache_match(rqstp, csum, rp)) {
295 rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum && 350 ret = rp;
296 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && 351 break;
297 rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) 352 }
298 return rp;
299 } 353 }
300 return NULL; 354
355 /* tally hash chain length stats */
356 if (entries > longest_chain) {
357 longest_chain = entries;
358 longest_chain_cachesize = num_drc_entries;
359 } else if (entries == longest_chain) {
360 /* prefer to keep the smallest cachesize possible here */
361 longest_chain_cachesize = min(longest_chain_cachesize,
362 num_drc_entries);
363 }
364
365 return ret;
301} 366}
302 367
303/* 368/*
@@ -318,55 +383,55 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
318 __wsum csum; 383 __wsum csum;
319 unsigned long age; 384 unsigned long age;
320 int type = rqstp->rq_cachetype; 385 int type = rqstp->rq_cachetype;
321 int rtn; 386 int rtn = RC_DOIT;
322 387
323 rqstp->rq_cacherep = NULL; 388 rqstp->rq_cacherep = NULL;
324 if (type == RC_NOCACHE) { 389 if (type == RC_NOCACHE) {
325 nfsdstats.rcnocache++; 390 nfsdstats.rcnocache++;
326 return RC_DOIT; 391 return rtn;
327 } 392 }
328 393
329 csum = nfsd_cache_csum(rqstp); 394 csum = nfsd_cache_csum(rqstp);
330 395
396 /*
397 * Since the common case is a cache miss followed by an insert,
398 * preallocate an entry. First, try to reuse the first entry on the LRU
399 * if it works, then go ahead and prune the LRU list.
400 */
331 spin_lock(&cache_lock); 401 spin_lock(&cache_lock);
332 rtn = RC_DOIT;
333
334 rp = nfsd_cache_search(rqstp, csum);
335 if (rp)
336 goto found_entry;
337
338 /* Try to use the first entry on the LRU */
339 if (!list_empty(&lru_head)) { 402 if (!list_empty(&lru_head)) {
340 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 403 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
341 if (nfsd_cache_entry_expired(rp) || 404 if (nfsd_cache_entry_expired(rp) ||
342 num_drc_entries >= max_drc_entries) { 405 num_drc_entries >= max_drc_entries) {
343 lru_put_end(rp); 406 lru_put_end(rp);
344 prune_cache_entries(); 407 prune_cache_entries();
345 goto setup_entry; 408 goto search_cache;
346 } 409 }
347 } 410 }
348 411
349 /* Drop the lock and allocate a new entry */ 412 /* No expired ones available, allocate a new one. */
350 spin_unlock(&cache_lock); 413 spin_unlock(&cache_lock);
351 rp = nfsd_reply_cache_alloc(); 414 rp = nfsd_reply_cache_alloc();
352 if (!rp) {
353 dprintk("nfsd: unable to allocate DRC entry!\n");
354 return RC_DOIT;
355 }
356 spin_lock(&cache_lock); 415 spin_lock(&cache_lock);
357 ++num_drc_entries; 416 if (likely(rp)) {
417 ++num_drc_entries;
418 drc_mem_usage += sizeof(*rp);
419 }
358 420
359 /* 421search_cache:
360 * Must search again just in case someone inserted one
361 * after we dropped the lock above.
362 */
363 found = nfsd_cache_search(rqstp, csum); 422 found = nfsd_cache_search(rqstp, csum);
364 if (found) { 423 if (found) {
365 nfsd_reply_cache_free_locked(rp); 424 if (likely(rp))
425 nfsd_reply_cache_free_locked(rp);
366 rp = found; 426 rp = found;
367 goto found_entry; 427 goto found_entry;
368 } 428 }
369 429
430 if (!rp) {
431 dprintk("nfsd: unable to allocate DRC entry!\n");
432 goto out;
433 }
434
370 /* 435 /*
371 * We're keeping the one we just allocated. Are we now over the 436 * We're keeping the one we just allocated. Are we now over the
372 * limit? Prune one off the tip of the LRU in trade for the one we 437 * limit? Prune one off the tip of the LRU in trade for the one we
@@ -376,7 +441,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
376 nfsd_reply_cache_free_locked(list_first_entry(&lru_head, 441 nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
377 struct svc_cacherep, c_lru)); 442 struct svc_cacherep, c_lru));
378 443
379setup_entry:
380 nfsdstats.rcmisses++; 444 nfsdstats.rcmisses++;
381 rqstp->rq_cacherep = rp; 445 rqstp->rq_cacherep = rp;
382 rp->c_state = RC_INPROG; 446 rp->c_state = RC_INPROG;
@@ -394,6 +458,7 @@ setup_entry:
394 458
395 /* release any buffer */ 459 /* release any buffer */
396 if (rp->c_type == RC_REPLBUFF) { 460 if (rp->c_type == RC_REPLBUFF) {
461 drc_mem_usage -= rp->c_replvec.iov_len;
397 kfree(rp->c_replvec.iov_base); 462 kfree(rp->c_replvec.iov_base);
398 rp->c_replvec.iov_base = NULL; 463 rp->c_replvec.iov_base = NULL;
399 } 464 }
@@ -462,6 +527,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
462 struct svc_cacherep *rp = rqstp->rq_cacherep; 527 struct svc_cacherep *rp = rqstp->rq_cacherep;
463 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 528 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
464 int len; 529 int len;
530 size_t bufsize = 0;
465 531
466 if (!rp) 532 if (!rp)
467 return; 533 return;
@@ -483,19 +549,21 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
483 break; 549 break;
484 case RC_REPLBUFF: 550 case RC_REPLBUFF:
485 cachv = &rp->c_replvec; 551 cachv = &rp->c_replvec;
486 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); 552 bufsize = len << 2;
553 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
487 if (!cachv->iov_base) { 554 if (!cachv->iov_base) {
488 nfsd_reply_cache_free(rp); 555 nfsd_reply_cache_free(rp);
489 return; 556 return;
490 } 557 }
491 cachv->iov_len = len << 2; 558 cachv->iov_len = bufsize;
492 memcpy(cachv->iov_base, statp, len << 2); 559 memcpy(cachv->iov_base, statp, bufsize);
493 break; 560 break;
494 case RC_NOCACHE: 561 case RC_NOCACHE:
495 nfsd_reply_cache_free(rp); 562 nfsd_reply_cache_free(rp);
496 return; 563 return;
497 } 564 }
498 spin_lock(&cache_lock); 565 spin_lock(&cache_lock);
566 drc_mem_usage += bufsize;
499 lru_put_end(rp); 567 lru_put_end(rp);
500 rp->c_secure = rqstp->rq_secure; 568 rp->c_secure = rqstp->rq_secure;
501 rp->c_type = cachetype; 569 rp->c_type = cachetype;
@@ -523,3 +591,30 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
523 vec->iov_len += data->iov_len; 591 vec->iov_len += data->iov_len;
524 return 1; 592 return 1;
525} 593}
594
595/*
596 * Note that fields may be added, removed or reordered in the future. Programs
597 * scraping this file for info should test the labels to ensure they're
598 * getting the correct field.
599 */
600static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
601{
602 spin_lock(&cache_lock);
603 seq_printf(m, "max entries: %u\n", max_drc_entries);
604 seq_printf(m, "num entries: %u\n", num_drc_entries);
605 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
606 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
607 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
608 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
609 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
610 seq_printf(m, "payload misses: %u\n", payload_misses);
611 seq_printf(m, "longest chain len: %u\n", longest_chain);
612 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
613 spin_unlock(&cache_lock);
614 return 0;
615}
616
617int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, nfsd_reply_cache_stats_show, NULL);
620}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 5bee0313dffd..7f555179bf81 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -35,6 +35,7 @@ enum {
35 NFSD_Threads, 35 NFSD_Threads,
36 NFSD_Pool_Threads, 36 NFSD_Pool_Threads,
37 NFSD_Pool_Stats, 37 NFSD_Pool_Stats,
38 NFSD_Reply_Cache_Stats,
38 NFSD_Versions, 39 NFSD_Versions,
39 NFSD_Ports, 40 NFSD_Ports,
40 NFSD_MaxBlkSize, 41 NFSD_MaxBlkSize,
@@ -212,6 +213,13 @@ static const struct file_operations pool_stats_operations = {
212 .owner = THIS_MODULE, 213 .owner = THIS_MODULE,
213}; 214};
214 215
216static struct file_operations reply_cache_stats_operations = {
217 .open = nfsd_reply_cache_stats_open,
218 .read = seq_read,
219 .llseek = seq_lseek,
220 .release = single_release,
221};
222
215/*----------------------------------------------------------------------------*/ 223/*----------------------------------------------------------------------------*/
216/* 224/*
217 * payload - write methods 225 * payload - write methods
@@ -1047,6 +1055,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1047 [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, 1055 [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
1048 [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR}, 1056 [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR},
1049 [NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO}, 1057 [NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO},
1058 [NFSD_Reply_Cache_Stats] = {"reply_cache_stats", &reply_cache_stats_operations, S_IRUGO},
1050 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 1059 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
1051 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 1060 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
1052 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, 1061 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
@@ -1102,8 +1111,10 @@ static int create_proc_exports_entry(void)
1102 return -ENOMEM; 1111 return -ENOMEM;
1103 entry = proc_create("exports", 0, entry, 1112 entry = proc_create("exports", 0, entry,
1104 &exports_proc_operations); 1113 &exports_proc_operations);
1105 if (!entry) 1114 if (!entry) {
1115 remove_proc_entry("fs/nfs", NULL);
1106 return -ENOMEM; 1116 return -ENOMEM;
1117 }
1107 return 0; 1118 return 0;
1108} 1119}
1109#else /* CONFIG_PROC_FS */ 1120#else /* CONFIG_PROC_FS */
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 1a8c7391f7ae..274e2a114e05 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -79,6 +79,8 @@ struct nfs4_stid {
79#define NFS4_DELEG_STID 4 79#define NFS4_DELEG_STID 4
80/* For an open stateid kept around *only* to process close replays: */ 80/* For an open stateid kept around *only* to process close replays: */
81#define NFS4_CLOSED_STID 8 81#define NFS4_CLOSED_STID 8
82/* For a deleg stateid kept around only to process free_stateid's: */
83#define NFS4_REVOKED_DELEG_STID 16
82 unsigned char sc_type; 84 unsigned char sc_type;
83 stateid_t sc_stateid; 85 stateid_t sc_stateid;
84 struct nfs4_client *sc_client; 86 struct nfs4_client *sc_client;
@@ -194,9 +196,11 @@ struct nfsd4_conn {
194}; 196};
195 197
196struct nfsd4_session { 198struct nfsd4_session {
197 struct kref se_ref; 199 atomic_t se_ref;
198 struct list_head se_hash; /* hash by sessionid */ 200 struct list_head se_hash; /* hash by sessionid */
199 struct list_head se_perclnt; 201 struct list_head se_perclnt;
202/* See SESSION4_PERSIST, etc. for standard flags; this is internal-only: */
203#define NFS4_SESSION_DEAD 0x010
200 u32 se_flags; 204 u32 se_flags;
201 struct nfs4_client *se_client; 205 struct nfs4_client *se_client;
202 struct nfs4_sessionid se_sessionid; 206 struct nfs4_sessionid se_sessionid;
@@ -236,6 +240,7 @@ struct nfs4_client {
236 struct list_head cl_openowners; 240 struct list_head cl_openowners;
237 struct idr cl_stateids; /* stateid lookup */ 241 struct idr cl_stateids; /* stateid lookup */
238 struct list_head cl_delegations; 242 struct list_head cl_delegations;
243 struct list_head cl_revoked; /* unacknowledged, revoked 4.1 state */
239 struct list_head cl_lru; /* tail queue */ 244 struct list_head cl_lru; /* tail queue */
240 struct xdr_netobj cl_name; /* id generated by client */ 245 struct xdr_netobj cl_name; /* id generated by client */
241 nfs4_verifier cl_verifier; /* generated by client */ 246 nfs4_verifier cl_verifier; /* generated by client */
@@ -286,18 +291,6 @@ struct nfs4_client {
286 struct net *net; 291 struct net *net;
287}; 292};
288 293
289static inline void
290mark_client_expired(struct nfs4_client *clp)
291{
292 clp->cl_time = 0;
293}
294
295static inline bool
296is_client_expired(struct nfs4_client *clp)
297{
298 return clp->cl_time == 0;
299}
300
301/* struct nfs4_client_reset 294/* struct nfs4_client_reset
302 * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl 295 * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl
303 * upon lease reset, or from upcall to state_daemon (to read in state 296 * upon lease reset, or from upcall to state_daemon (to read in state
@@ -365,7 +358,6 @@ struct nfs4_openowner {
365 struct nfs4_ol_stateid *oo_last_closed_stid; 358 struct nfs4_ol_stateid *oo_last_closed_stid;
366 time_t oo_time; /* time of placement on so_close_lru */ 359 time_t oo_time; /* time of placement on so_close_lru */
367#define NFS4_OO_CONFIRMED 1 360#define NFS4_OO_CONFIRMED 1
368#define NFS4_OO_PURGE_CLOSE 2
369#define NFS4_OO_NEW 4 361#define NFS4_OO_NEW 4
370 unsigned char oo_flags; 362 unsigned char oo_flags;
371}; 363};
@@ -373,7 +365,7 @@ struct nfs4_openowner {
373struct nfs4_lockowner { 365struct nfs4_lockowner {
374 struct nfs4_stateowner lo_owner; /* must be first element */ 366 struct nfs4_stateowner lo_owner; /* must be first element */
375 struct list_head lo_owner_ino_hash; /* hash by owner,file */ 367 struct list_head lo_owner_ino_hash; /* hash by owner,file */
376 struct list_head lo_perstateid; /* for lockowners only */ 368 struct list_head lo_perstateid;
377 struct list_head lo_list; /* for temporary uses */ 369 struct list_head lo_list; /* for temporary uses */
378}; 370};
379 371
@@ -390,7 +382,7 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
390/* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */ 382/* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */
391struct nfs4_file { 383struct nfs4_file {
392 atomic_t fi_ref; 384 atomic_t fi_ref;
393 struct list_head fi_hash; /* hash by "struct inode *" */ 385 struct hlist_node fi_hash; /* hash by "struct inode *" */
394 struct list_head fi_stateids; 386 struct list_head fi_stateids;
395 struct list_head fi_delegations; 387 struct list_head fi_delegations;
396 /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ 388 /* One each for O_RDONLY, O_WRONLY, O_RDWR: */
@@ -486,8 +478,7 @@ extern void nfs4_put_delegation(struct nfs4_delegation *dp);
486extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, 478extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
487 struct nfsd_net *nn); 479 struct nfsd_net *nn);
488extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); 480extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
489extern void release_session_client(struct nfsd4_session *); 481extern void put_client_renew(struct nfs4_client *clp);
490extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
491 482
492/* nfs4recover operations */ 483/* nfs4recover operations */
493extern int nfsd4_client_tracking_init(struct net *net); 484extern int nfsd4_client_tracking_init(struct net *net);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2b2e2396a869..84ce601d8063 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1758,10 +1758,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1758 tdentry = tfhp->fh_dentry; 1758 tdentry = tfhp->fh_dentry;
1759 tdir = tdentry->d_inode; 1759 tdir = tdentry->d_inode;
1760 1760
1761 err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
1762 if (ffhp->fh_export != tfhp->fh_export)
1763 goto out;
1764
1765 err = nfserr_perm; 1761 err = nfserr_perm;
1766 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) 1762 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
1767 goto out; 1763 goto out;
@@ -1802,6 +1798,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1802 host_err = -EXDEV; 1798 host_err = -EXDEV;
1803 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) 1799 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
1804 goto out_dput_new; 1800 goto out_dput_new;
1801 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
1802 goto out_dput_new;
1805 1803
1806 host_err = nfsd_break_lease(odentry->d_inode); 1804 host_err = nfsd_break_lease(odentry->d_inode);
1807 if (host_err) 1805 if (host_err)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 546f8983ecf1..3b271d2092b6 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -184,7 +184,6 @@ struct nfsd4_lock {
184#define lk_old_lock_stateid v.old.lock_stateid 184#define lk_old_lock_stateid v.old.lock_stateid
185#define lk_old_lock_seqid v.old.lock_seqid 185#define lk_old_lock_seqid v.old.lock_seqid
186 186
187#define lk_rflags u.ok.rflags
188#define lk_resp_stateid u.ok.stateid 187#define lk_resp_stateid u.ok.stateid
189#define lk_denied u.denied 188#define lk_denied u.denied
190 189
@@ -237,6 +236,7 @@ struct nfsd4_open {
237 u32 op_share_deny; /* request */ 236 u32 op_share_deny; /* request */
238 u32 op_deleg_want; /* request */ 237 u32 op_deleg_want; /* request */
239 stateid_t op_stateid; /* response */ 238 stateid_t op_stateid; /* response */
239 __be32 op_xdr_error; /* see nfsd4_open_omfg() */
240 u32 op_recall; /* recall */ 240 u32 op_recall; /* recall */
241 struct nfsd4_change_info op_cinfo; /* response */ 241 struct nfsd4_change_info op_cinfo; /* response */
242 u32 op_rflags; /* response */ 242 u32 op_rflags; /* response */
@@ -623,6 +623,7 @@ extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp,
623 struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid); 623 struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid);
624extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, 624extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp,
625 struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); 625 struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid);
626extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr);
626#endif 627#endif
627 628
628/* 629/*
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
new file mode 100644
index 000000000000..c5c55dfb91a9
--- /dev/null
+++ b/fs/nfsd/xdr4cb.h
@@ -0,0 +1,23 @@
1#define NFS4_MAXTAGLEN 20
2
3#define NFS4_enc_cb_null_sz 0
4#define NFS4_dec_cb_null_sz 0
5#define cb_compound_enc_hdr_sz 4
6#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
7#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
8#define cb_sequence_enc_sz (sessionid_sz + 4 + \
9 1 /* no referring calls list yet */)
10#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
11
12#define op_enc_sz 1
13#define op_dec_sz 2
14#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
15#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
16#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
17 cb_sequence_enc_sz + \
18 1 + enc_stateid_sz + \
19 enc_nfs4_fh_sz)
20
21#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
22 cb_sequence_dec_sz + \
23 op_dec_sz)